]> Git Repo - qemu.git/blame_incremental - target-ppc/op_helper.c
target-ppc: convert dcbz instruction to TCG
[qemu.git] / target-ppc / op_helper.c
... / ...
CommitLineData
1/*
2 * PowerPC emulation helpers for qemu.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "exec.h"
21#include "host-utils.h"
22#include "helper.h"
23
24#include "helper_regs.h"
25#include "op_helper.h"
26
27#define MEMSUFFIX _raw
28#include "op_helper.h"
29#include "op_helper_mem.h"
30#if !defined(CONFIG_USER_ONLY)
31#define MEMSUFFIX _user
32#include "op_helper.h"
33#include "op_helper_mem.h"
34#define MEMSUFFIX _kernel
35#include "op_helper.h"
36#include "op_helper_mem.h"
37#define MEMSUFFIX _hypv
38#include "op_helper.h"
39#include "op_helper_mem.h"
40#endif
41
42//#define DEBUG_OP
43//#define DEBUG_EXCEPTIONS
44//#define DEBUG_SOFTWARE_TLB
45
46/*****************************************************************************/
47/* Exceptions processing helpers */
48
49void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
50{
51 raise_exception_err(env, exception, error_code);
52}
53
54void helper_raise_debug (void)
55{
56 raise_exception(env, EXCP_DEBUG);
57}
58
59/*****************************************************************************/
60/* Registers load and stores */
61target_ulong helper_load_cr (void)
62{
63 return (env->crf[0] << 28) |
64 (env->crf[1] << 24) |
65 (env->crf[2] << 20) |
66 (env->crf[3] << 16) |
67 (env->crf[4] << 12) |
68 (env->crf[5] << 8) |
69 (env->crf[6] << 4) |
70 (env->crf[7] << 0);
71}
72
73void helper_store_cr (target_ulong val, uint32_t mask)
74{
75 int i, sh;
76
77 for (i = 0, sh = 7; i < 8; i++, sh--) {
78 if (mask & (1 << sh))
79 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
80 }
81}
82
83#if defined(TARGET_PPC64)
84void do_store_pri (int prio)
85{
86 env->spr[SPR_PPR] &= ~0x001C000000000000ULL;
87 env->spr[SPR_PPR] |= ((uint64_t)prio & 0x7) << 50;
88}
89#endif
90
91target_ulong ppc_load_dump_spr (int sprn)
92{
93 if (loglevel != 0) {
94 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
95 sprn, sprn, env->spr[sprn]);
96 }
97
98 return env->spr[sprn];
99}
100
101void ppc_store_dump_spr (int sprn, target_ulong val)
102{
103 if (loglevel != 0) {
104 fprintf(logfile, "Write SPR %d %03x => " ADDRX " <= " ADDRX "\n",
105 sprn, sprn, env->spr[sprn], val);
106 }
107 env->spr[sprn] = val;
108}
109
110/*****************************************************************************/
111/* Memory load and stores */
112
113static always_inline target_ulong get_addr(target_ulong addr)
114{
115#if defined(TARGET_PPC64)
116 if (msr_sf)
117 return addr;
118 else
119#endif
120 return (uint32_t)addr;
121}
122
123void helper_lmw (target_ulong addr, uint32_t reg)
124{
125#ifdef CONFIG_USER_ONLY
126#define ldfun ldl_raw
127#else
128 int (*ldfun)(target_ulong);
129
130 switch (env->mmu_idx) {
131 default:
132 case 0: ldfun = ldl_user;
133 break;
134 case 1: ldfun = ldl_kernel;
135 break;
136 case 2: ldfun = ldl_hypv;
137 break;
138 }
139#endif
140 for (; reg < 32; reg++, addr += 4) {
141 if (msr_le)
142 env->gpr[reg] = bswap32(ldfun(get_addr(addr)));
143 else
144 env->gpr[reg] = ldfun(get_addr(addr));
145 }
146}
147
148void helper_stmw (target_ulong addr, uint32_t reg)
149{
150#ifdef CONFIG_USER_ONLY
151#define stfun stl_raw
152#else
153 void (*stfun)(target_ulong, int);
154
155 switch (env->mmu_idx) {
156 default:
157 case 0: stfun = stl_user;
158 break;
159 case 1: stfun = stl_kernel;
160 break;
161 case 2: stfun = stl_hypv;
162 break;
163 }
164#endif
165 for (; reg < 32; reg++, addr += 4) {
166 if (msr_le)
167 stfun(get_addr(addr), bswap32((uint32_t)env->gpr[reg]));
168 else
169 stfun(get_addr(addr), (uint32_t)env->gpr[reg]);
170 }
171}
172
173static void do_dcbz(target_ulong addr, int dcache_line_size)
174{
175 target_long mask = get_addr(~(dcache_line_size - 1));
176 int i;
177#ifdef CONFIG_USER_ONLY
178#define stfun stl_raw
179#else
180 void (*stfun)(target_ulong, int);
181
182 switch (env->mmu_idx) {
183 default:
184 case 0: stfun = stl_user;
185 break;
186 case 1: stfun = stl_kernel;
187 break;
188 case 2: stfun = stl_hypv;
189 break;
190 }
191#endif
192 addr &= mask;
193 for (i = 0 ; i < dcache_line_size ; i += 4) {
194 stfun(addr + i , 0);
195 }
196 if ((env->reserve & mask) == addr)
197 env->reserve = (target_ulong)-1ULL;
198}
199
200void helper_dcbz(target_ulong addr)
201{
202 do_dcbz(addr, env->dcache_line_size);
203}
204
205void helper_dcbz_970(target_ulong addr)
206{
207 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
208 do_dcbz(addr, 32);
209 else
210 do_dcbz(addr, env->dcache_line_size);
211}
212
213/*****************************************************************************/
214/* Fixed point operations helpers */
215#if defined(TARGET_PPC64)
216
217/* multiply high word */
218uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
219{
220 uint64_t tl, th;
221
222 muls64(&tl, &th, arg1, arg2);
223 return th;
224}
225
226/* multiply high word unsigned */
227uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
228{
229 uint64_t tl, th;
230
231 mulu64(&tl, &th, arg1, arg2);
232 return th;
233}
234
235uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
236{
237 int64_t th;
238 uint64_t tl;
239
240 muls64(&tl, (uint64_t *)&th, arg1, arg2);
241 /* If th != 0 && th != -1, then we had an overflow */
242 if (likely((uint64_t)(th + 1) <= 1)) {
243 env->xer &= ~(1 << XER_OV);
244 } else {
245 env->xer |= (1 << XER_OV) | (1 << XER_SO);
246 }
247 return (int64_t)tl;
248}
249#endif
250
251target_ulong helper_cntlzw (target_ulong t)
252{
253 return clz32(t);
254}
255
256#if defined(TARGET_PPC64)
257target_ulong helper_cntlzd (target_ulong t)
258{
259 return clz64(t);
260}
261#endif
262
263/* shift right arithmetic helper */
264target_ulong helper_sraw (target_ulong value, target_ulong shift)
265{
266 int32_t ret;
267
268 if (likely(!(shift & 0x20))) {
269 if (likely((uint32_t)shift != 0)) {
270 shift &= 0x1f;
271 ret = (int32_t)value >> shift;
272 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
273 env->xer &= ~(1 << XER_CA);
274 } else {
275 env->xer |= (1 << XER_CA);
276 }
277 } else {
278 ret = (int32_t)value;
279 env->xer &= ~(1 << XER_CA);
280 }
281 } else {
282 ret = (int32_t)value >> 31;
283 if (ret) {
284 env->xer |= (1 << XER_CA);
285 } else {
286 env->xer &= ~(1 << XER_CA);
287 }
288 }
289 return (target_long)ret;
290}
291
292#if defined(TARGET_PPC64)
293target_ulong helper_srad (target_ulong value, target_ulong shift)
294{
295 int64_t ret;
296
297 if (likely(!(shift & 0x40))) {
298 if (likely((uint64_t)shift != 0)) {
299 shift &= 0x3f;
300 ret = (int64_t)value >> shift;
301 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
302 env->xer &= ~(1 << XER_CA);
303 } else {
304 env->xer |= (1 << XER_CA);
305 }
306 } else {
307 ret = (int64_t)value;
308 env->xer &= ~(1 << XER_CA);
309 }
310 } else {
311 ret = (int64_t)value >> 63;
312 if (ret) {
313 env->xer |= (1 << XER_CA);
314 } else {
315 env->xer &= ~(1 << XER_CA);
316 }
317 }
318 return ret;
319}
320#endif
321
322target_ulong helper_popcntb (target_ulong val)
323{
324 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
325 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
326 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
327 return val;
328}
329
330#if defined(TARGET_PPC64)
331target_ulong helper_popcntb_64 (target_ulong val)
332{
333 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
334 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
335 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
336 return val;
337}
338#endif
339
340/*****************************************************************************/
341/* Floating point operations helpers */
342uint64_t helper_float32_to_float64(uint32_t arg)
343{
344 CPU_FloatU f;
345 CPU_DoubleU d;
346 f.l = arg;
347 d.d = float32_to_float64(f.f, &env->fp_status);
348 return d.ll;
349}
350
351uint32_t helper_float64_to_float32(uint64_t arg)
352{
353 CPU_FloatU f;
354 CPU_DoubleU d;
355 d.ll = arg;
356 f.f = float64_to_float32(d.d, &env->fp_status);
357 return f.l;
358}
359
360static always_inline int fpisneg (float64 d)
361{
362 CPU_DoubleU u;
363
364 u.d = d;
365
366 return u.ll >> 63 != 0;
367}
368
369static always_inline int isden (float64 d)
370{
371 CPU_DoubleU u;
372
373 u.d = d;
374
375 return ((u.ll >> 52) & 0x7FF) == 0;
376}
377
378static always_inline int iszero (float64 d)
379{
380 CPU_DoubleU u;
381
382 u.d = d;
383
384 return (u.ll & ~0x8000000000000000ULL) == 0;
385}
386
387static always_inline int isinfinity (float64 d)
388{
389 CPU_DoubleU u;
390
391 u.d = d;
392
393 return ((u.ll >> 52) & 0x7FF) == 0x7FF &&
394 (u.ll & 0x000FFFFFFFFFFFFFULL) == 0;
395}
396
397#ifdef CONFIG_SOFTFLOAT
398static always_inline int isfinite (float64 d)
399{
400 CPU_DoubleU u;
401
402 u.d = d;
403
404 return (((u.ll >> 52) & 0x7FF) != 0x7FF);
405}
406
407static always_inline int isnormal (float64 d)
408{
409 CPU_DoubleU u;
410
411 u.d = d;
412
413 uint32_t exp = (u.ll >> 52) & 0x7FF;
414 return ((0 < exp) && (exp < 0x7FF));
415}
416#endif
417
418uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
419{
420 CPU_DoubleU farg;
421 int isneg;
422 int ret;
423 farg.ll = arg;
424 isneg = fpisneg(farg.d);
425 if (unlikely(float64_is_nan(farg.d))) {
426 if (float64_is_signaling_nan(farg.d)) {
427 /* Signaling NaN: flags are undefined */
428 ret = 0x00;
429 } else {
430 /* Quiet NaN */
431 ret = 0x11;
432 }
433 } else if (unlikely(isinfinity(farg.d))) {
434 /* +/- infinity */
435 if (isneg)
436 ret = 0x09;
437 else
438 ret = 0x05;
439 } else {
440 if (iszero(farg.d)) {
441 /* +/- zero */
442 if (isneg)
443 ret = 0x12;
444 else
445 ret = 0x02;
446 } else {
447 if (isden(farg.d)) {
448 /* Denormalized numbers */
449 ret = 0x10;
450 } else {
451 /* Normalized numbers */
452 ret = 0x00;
453 }
454 if (isneg) {
455 ret |= 0x08;
456 } else {
457 ret |= 0x04;
458 }
459 }
460 }
461 if (set_fprf) {
462 /* We update FPSCR_FPRF */
463 env->fpscr &= ~(0x1F << FPSCR_FPRF);
464 env->fpscr |= ret << FPSCR_FPRF;
465 }
466 /* We just need fpcc to update Rc1 */
467 return ret & 0xF;
468}
469
470/* Floating-point invalid operations exception */
471static always_inline uint64_t fload_invalid_op_excp (int op)
472{
473 uint64_t ret = 0;
474 int ve;
475
476 ve = fpscr_ve;
477 if (op & POWERPC_EXCP_FP_VXSNAN) {
478 /* Operation on signaling NaN */
479 env->fpscr |= 1 << FPSCR_VXSNAN;
480 }
481 if (op & POWERPC_EXCP_FP_VXSOFT) {
482 /* Software-defined condition */
483 env->fpscr |= 1 << FPSCR_VXSOFT;
484 }
485 switch (op & ~(POWERPC_EXCP_FP_VXSOFT | POWERPC_EXCP_FP_VXSNAN)) {
486 case POWERPC_EXCP_FP_VXISI:
487 /* Magnitude subtraction of infinities */
488 env->fpscr |= 1 << FPSCR_VXISI;
489 goto update_arith;
490 case POWERPC_EXCP_FP_VXIDI:
491 /* Division of infinity by infinity */
492 env->fpscr |= 1 << FPSCR_VXIDI;
493 goto update_arith;
494 case POWERPC_EXCP_FP_VXZDZ:
495 /* Division of zero by zero */
496 env->fpscr |= 1 << FPSCR_VXZDZ;
497 goto update_arith;
498 case POWERPC_EXCP_FP_VXIMZ:
499 /* Multiplication of zero by infinity */
500 env->fpscr |= 1 << FPSCR_VXIMZ;
501 goto update_arith;
502 case POWERPC_EXCP_FP_VXVC:
503 /* Ordered comparison of NaN */
504 env->fpscr |= 1 << FPSCR_VXVC;
505 env->fpscr &= ~(0xF << FPSCR_FPCC);
506 env->fpscr |= 0x11 << FPSCR_FPCC;
507 /* We must update the target FPR before raising the exception */
508 if (ve != 0) {
509 env->exception_index = POWERPC_EXCP_PROGRAM;
510 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
511 /* Update the floating-point enabled exception summary */
512 env->fpscr |= 1 << FPSCR_FEX;
513 /* Exception is differed */
514 ve = 0;
515 }
516 break;
517 case POWERPC_EXCP_FP_VXSQRT:
518 /* Square root of a negative number */
519 env->fpscr |= 1 << FPSCR_VXSQRT;
520 update_arith:
521 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
522 if (ve == 0) {
523 /* Set the result to quiet NaN */
524 ret = UINT64_MAX;
525 env->fpscr &= ~(0xF << FPSCR_FPCC);
526 env->fpscr |= 0x11 << FPSCR_FPCC;
527 }
528 break;
529 case POWERPC_EXCP_FP_VXCVI:
530 /* Invalid conversion */
531 env->fpscr |= 1 << FPSCR_VXCVI;
532 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
533 if (ve == 0) {
534 /* Set the result to quiet NaN */
535 ret = UINT64_MAX;
536 env->fpscr &= ~(0xF << FPSCR_FPCC);
537 env->fpscr |= 0x11 << FPSCR_FPCC;
538 }
539 break;
540 }
541 /* Update the floating-point invalid operation summary */
542 env->fpscr |= 1 << FPSCR_VX;
543 /* Update the floating-point exception summary */
544 env->fpscr |= 1 << FPSCR_FX;
545 if (ve != 0) {
546 /* Update the floating-point enabled exception summary */
547 env->fpscr |= 1 << FPSCR_FEX;
548 if (msr_fe0 != 0 || msr_fe1 != 0)
549 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
550 }
551 return ret;
552}
553
554static always_inline uint64_t float_zero_divide_excp (uint64_t arg1, uint64_t arg2)
555{
556 env->fpscr |= 1 << FPSCR_ZX;
557 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
558 /* Update the floating-point exception summary */
559 env->fpscr |= 1 << FPSCR_FX;
560 if (fpscr_ze != 0) {
561 /* Update the floating-point enabled exception summary */
562 env->fpscr |= 1 << FPSCR_FEX;
563 if (msr_fe0 != 0 || msr_fe1 != 0) {
564 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
565 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
566 }
567 } else {
568 /* Set the result to infinity */
569 arg1 = ((arg1 ^ arg2) & 0x8000000000000000ULL);
570 arg1 |= 0x7FFULL << 52;
571 }
572 return arg1;
573}
574
575static always_inline void float_overflow_excp (void)
576{
577 env->fpscr |= 1 << FPSCR_OX;
578 /* Update the floating-point exception summary */
579 env->fpscr |= 1 << FPSCR_FX;
580 if (fpscr_oe != 0) {
581 /* XXX: should adjust the result */
582 /* Update the floating-point enabled exception summary */
583 env->fpscr |= 1 << FPSCR_FEX;
584 /* We must update the target FPR before raising the exception */
585 env->exception_index = POWERPC_EXCP_PROGRAM;
586 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
587 } else {
588 env->fpscr |= 1 << FPSCR_XX;
589 env->fpscr |= 1 << FPSCR_FI;
590 }
591}
592
593static always_inline void float_underflow_excp (void)
594{
595 env->fpscr |= 1 << FPSCR_UX;
596 /* Update the floating-point exception summary */
597 env->fpscr |= 1 << FPSCR_FX;
598 if (fpscr_ue != 0) {
599 /* XXX: should adjust the result */
600 /* Update the floating-point enabled exception summary */
601 env->fpscr |= 1 << FPSCR_FEX;
602 /* We must update the target FPR before raising the exception */
603 env->exception_index = POWERPC_EXCP_PROGRAM;
604 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
605 }
606}
607
608static always_inline void float_inexact_excp (void)
609{
610 env->fpscr |= 1 << FPSCR_XX;
611 /* Update the floating-point exception summary */
612 env->fpscr |= 1 << FPSCR_FX;
613 if (fpscr_xe != 0) {
614 /* Update the floating-point enabled exception summary */
615 env->fpscr |= 1 << FPSCR_FEX;
616 /* We must update the target FPR before raising the exception */
617 env->exception_index = POWERPC_EXCP_PROGRAM;
618 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
619 }
620}
621
622static always_inline void fpscr_set_rounding_mode (void)
623{
624 int rnd_type;
625
626 /* Set rounding mode */
627 switch (fpscr_rn) {
628 case 0:
629 /* Best approximation (round to nearest) */
630 rnd_type = float_round_nearest_even;
631 break;
632 case 1:
633 /* Smaller magnitude (round toward zero) */
634 rnd_type = float_round_to_zero;
635 break;
636 case 2:
637 /* Round toward +infinite */
638 rnd_type = float_round_up;
639 break;
640 default:
641 case 3:
642 /* Round toward -infinite */
643 rnd_type = float_round_down;
644 break;
645 }
646 set_float_rounding_mode(rnd_type, &env->fp_status);
647}
648
649void helper_fpscr_setbit (uint32_t bit)
650{
651 int prev;
652
653 prev = (env->fpscr >> bit) & 1;
654 env->fpscr |= 1 << bit;
655 if (prev == 0) {
656 switch (bit) {
657 case FPSCR_VX:
658 env->fpscr |= 1 << FPSCR_FX;
659 if (fpscr_ve)
660 goto raise_ve;
661 case FPSCR_OX:
662 env->fpscr |= 1 << FPSCR_FX;
663 if (fpscr_oe)
664 goto raise_oe;
665 break;
666 case FPSCR_UX:
667 env->fpscr |= 1 << FPSCR_FX;
668 if (fpscr_ue)
669 goto raise_ue;
670 break;
671 case FPSCR_ZX:
672 env->fpscr |= 1 << FPSCR_FX;
673 if (fpscr_ze)
674 goto raise_ze;
675 break;
676 case FPSCR_XX:
677 env->fpscr |= 1 << FPSCR_FX;
678 if (fpscr_xe)
679 goto raise_xe;
680 break;
681 case FPSCR_VXSNAN:
682 case FPSCR_VXISI:
683 case FPSCR_VXIDI:
684 case FPSCR_VXZDZ:
685 case FPSCR_VXIMZ:
686 case FPSCR_VXVC:
687 case FPSCR_VXSOFT:
688 case FPSCR_VXSQRT:
689 case FPSCR_VXCVI:
690 env->fpscr |= 1 << FPSCR_VX;
691 env->fpscr |= 1 << FPSCR_FX;
692 if (fpscr_ve != 0)
693 goto raise_ve;
694 break;
695 case FPSCR_VE:
696 if (fpscr_vx != 0) {
697 raise_ve:
698 env->error_code = POWERPC_EXCP_FP;
699 if (fpscr_vxsnan)
700 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
701 if (fpscr_vxisi)
702 env->error_code |= POWERPC_EXCP_FP_VXISI;
703 if (fpscr_vxidi)
704 env->error_code |= POWERPC_EXCP_FP_VXIDI;
705 if (fpscr_vxzdz)
706 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
707 if (fpscr_vximz)
708 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
709 if (fpscr_vxvc)
710 env->error_code |= POWERPC_EXCP_FP_VXVC;
711 if (fpscr_vxsoft)
712 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
713 if (fpscr_vxsqrt)
714 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
715 if (fpscr_vxcvi)
716 env->error_code |= POWERPC_EXCP_FP_VXCVI;
717 goto raise_excp;
718 }
719 break;
720 case FPSCR_OE:
721 if (fpscr_ox != 0) {
722 raise_oe:
723 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
724 goto raise_excp;
725 }
726 break;
727 case FPSCR_UE:
728 if (fpscr_ux != 0) {
729 raise_ue:
730 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
731 goto raise_excp;
732 }
733 break;
734 case FPSCR_ZE:
735 if (fpscr_zx != 0) {
736 raise_ze:
737 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
738 goto raise_excp;
739 }
740 break;
741 case FPSCR_XE:
742 if (fpscr_xx != 0) {
743 raise_xe:
744 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
745 goto raise_excp;
746 }
747 break;
748 case FPSCR_RN1:
749 case FPSCR_RN:
750 fpscr_set_rounding_mode();
751 break;
752 default:
753 break;
754 raise_excp:
755 /* Update the floating-point enabled exception summary */
756 env->fpscr |= 1 << FPSCR_FEX;
757 /* We have to update Rc1 before raising the exception */
758 env->exception_index = POWERPC_EXCP_PROGRAM;
759 break;
760 }
761 }
762}
763
764void helper_store_fpscr (uint64_t arg, uint32_t mask)
765{
766 /*
767 * We use only the 32 LSB of the incoming fpr
768 */
769 uint32_t prev, new;
770 int i;
771
772 prev = env->fpscr;
773 new = (uint32_t)arg;
774 new &= ~0x90000000;
775 new |= prev & 0x90000000;
776 for (i = 0; i < 7; i++) {
777 if (mask & (1 << i)) {
778 env->fpscr &= ~(0xF << (4 * i));
779 env->fpscr |= new & (0xF << (4 * i));
780 }
781 }
782 /* Update VX and FEX */
783 if (fpscr_ix != 0)
784 env->fpscr |= 1 << FPSCR_VX;
785 else
786 env->fpscr &= ~(1 << FPSCR_VX);
787 if ((fpscr_ex & fpscr_eex) != 0) {
788 env->fpscr |= 1 << FPSCR_FEX;
789 env->exception_index = POWERPC_EXCP_PROGRAM;
790 /* XXX: we should compute it properly */
791 env->error_code = POWERPC_EXCP_FP;
792 }
793 else
794 env->fpscr &= ~(1 << FPSCR_FEX);
795 fpscr_set_rounding_mode();
796}
797
798void helper_float_check_status (void)
799{
800#ifdef CONFIG_SOFTFLOAT
801 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
802 (env->error_code & POWERPC_EXCP_FP)) {
803 /* Differred floating-point exception after target FPR update */
804 if (msr_fe0 != 0 || msr_fe1 != 0)
805 raise_exception_err(env, env->exception_index, env->error_code);
806 } else if (env->fp_status.float_exception_flags & float_flag_overflow) {
807 float_overflow_excp();
808 } else if (env->fp_status.float_exception_flags & float_flag_underflow) {
809 float_underflow_excp();
810 } else if (env->fp_status.float_exception_flags & float_flag_inexact) {
811 float_inexact_excp();
812 }
813#else
814 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
815 (env->error_code & POWERPC_EXCP_FP)) {
816 /* Differred floating-point exception after target FPR update */
817 if (msr_fe0 != 0 || msr_fe1 != 0)
818 raise_exception_err(env, env->exception_index, env->error_code);
819 }
820 RETURN();
821#endif
822}
823
824#ifdef CONFIG_SOFTFLOAT
825void helper_reset_fpstatus (void)
826{
827 env->fp_status.float_exception_flags = 0;
828}
829#endif
830
831/* fadd - fadd. */
832uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
833{
834 CPU_DoubleU farg1, farg2;
835
836 farg1.ll = arg1;
837 farg2.ll = arg2;
838#if USE_PRECISE_EMULATION
839 if (unlikely(float64_is_signaling_nan(farg1.d) ||
840 float64_is_signaling_nan(farg2.d))) {
841 /* sNaN addition */
842 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
843 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
844 fpisneg(farg1.d) == fpisneg(farg2.d))) {
845 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
846 } else {
847 /* Magnitude subtraction of infinities */
848 farg1.ll == fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
849 }
850#else
851 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
852#endif
853 return farg1.ll;
854}
855
856/* fsub - fsub. */
857uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
858{
859 CPU_DoubleU farg1, farg2;
860
861 farg1.ll = arg1;
862 farg2.ll = arg2;
863#if USE_PRECISE_EMULATION
864{
865 if (unlikely(float64_is_signaling_nan(farg1.d) ||
866 float64_is_signaling_nan(farg2.d))) {
867 /* sNaN subtraction */
868 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
869 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
870 fpisneg(farg1.d) != fpisneg(farg2.d))) {
871 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
872 } else {
873 /* Magnitude subtraction of infinities */
874 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
875 }
876}
877#else
878 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
879#endif
880 return farg1.ll;
881}
882
883/* fmul - fmul. */
884uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
885{
886 CPU_DoubleU farg1, farg2;
887
888 farg1.ll = arg1;
889 farg2.ll = arg2;
890#if USE_PRECISE_EMULATION
891 if (unlikely(float64_is_signaling_nan(farg1.d) ||
892 float64_is_signaling_nan(farg2.d))) {
893 /* sNaN multiplication */
894 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
895 } else if (unlikely((isinfinity(farg1.d) && iszero(farg2.d)) ||
896 (iszero(farg1.d) && isinfinity(farg2.d)))) {
897 /* Multiplication of zero by infinity */
898 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
899 } else {
900 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
901 }
902}
903#else
904 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
905#endif
906 return farg1.ll;
907}
908
909/* fdiv - fdiv. */
910uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
911{
912 CPU_DoubleU farg1, farg2;
913
914 farg1.ll = arg1;
915 farg2.ll = arg2;
916#if USE_PRECISE_EMULATION
917 if (unlikely(float64_is_signaling_nan(farg1.d) ||
918 float64_is_signaling_nan(farg2.d))) {
919 /* sNaN division */
920 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
921 } else if (unlikely(isinfinity(farg1.d) && isinfinity(farg2.d))) {
922 /* Division of infinity by infinity */
923 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
924 } else if (unlikely(iszero(farg2.d))) {
925 if (iszero(farg1.d)) {
926 /* Division of zero by zero */
927 farg1.ll fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
928 } else {
929 /* Division by zero */
930 farg1.ll = float_zero_divide_excp(farg1.d, farg2.d);
931 }
932 } else {
933 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
934 }
935#else
936 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
937#endif
938 return farg1.ll;
939}
940
941/* fabs */
942uint64_t helper_fabs (uint64_t arg)
943{
944 CPU_DoubleU farg;
945
946 farg.ll = arg;
947 farg.d = float64_abs(farg.d);
948 return farg.ll;
949}
950
951/* fnabs */
952uint64_t helper_fnabs (uint64_t arg)
953{
954 CPU_DoubleU farg;
955
956 farg.ll = arg;
957 farg.d = float64_abs(farg.d);
958 farg.d = float64_chs(farg.d);
959 return farg.ll;
960}
961
962/* fneg */
963uint64_t helper_fneg (uint64_t arg)
964{
965 CPU_DoubleU farg;
966
967 farg.ll = arg;
968 farg.d = float64_chs(farg.d);
969 return farg.ll;
970}
971
972/* fctiw - fctiw. */
973uint64_t helper_fctiw (uint64_t arg)
974{
975 CPU_DoubleU farg;
976 farg.ll = arg;
977
978 if (unlikely(float64_is_signaling_nan(farg.d))) {
979 /* sNaN conversion */
980 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
981 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
982 /* qNan / infinity conversion */
983 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
984 } else {
985 farg.ll = float64_to_int32(farg.d, &env->fp_status);
986#if USE_PRECISE_EMULATION
987 /* XXX: higher bits are not supposed to be significant.
988 * to make tests easier, return the same as a real PowerPC 750
989 */
990 farg.ll |= 0xFFF80000ULL << 32;
991#endif
992 }
993 return farg.ll;
994}
995
996/* fctiwz - fctiwz. */
997uint64_t helper_fctiwz (uint64_t arg)
998{
999 CPU_DoubleU farg;
1000 farg.ll = arg;
1001
1002 if (unlikely(float64_is_signaling_nan(farg.d))) {
1003 /* sNaN conversion */
1004 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1005 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1006 /* qNan / infinity conversion */
1007 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1008 } else {
1009 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1010#if USE_PRECISE_EMULATION
1011 /* XXX: higher bits are not supposed to be significant.
1012 * to make tests easier, return the same as a real PowerPC 750
1013 */
1014 farg.ll |= 0xFFF80000ULL << 32;
1015#endif
1016 }
1017 return farg.ll;
1018}
1019
1020#if defined(TARGET_PPC64)
1021/* fcfid - fcfid. */
1022uint64_t helper_fcfid (uint64_t arg)
1023{
1024 CPU_DoubleU farg;
1025 farg.d = int64_to_float64(arg, &env->fp_status);
1026 return farg.ll;
1027}
1028
1029/* fctid - fctid. */
1030uint64_t helper_fctid (uint64_t arg)
1031{
1032 CPU_DoubleU farg;
1033 farg.ll = arg;
1034
1035 if (unlikely(float64_is_signaling_nan(farg.d))) {
1036 /* sNaN conversion */
1037 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1038 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1039 /* qNan / infinity conversion */
1040 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1041 } else {
1042 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1043 }
1044 return farg.ll;
1045}
1046
1047/* fctidz - fctidz. */
1048uint64_t helper_fctidz (uint64_t arg)
1049{
1050 CPU_DoubleU farg;
1051 farg.ll = arg;
1052
1053 if (unlikely(float64_is_signaling_nan(farg.d))) {
1054 /* sNaN conversion */
1055 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1056 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1057 /* qNan / infinity conversion */
1058 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1059 } else {
1060 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1061 }
1062 return farg.ll;
1063}
1064
1065#endif
1066
1067static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1068{
1069 CPU_DoubleU farg;
1070 farg.ll = arg;
1071
1072 if (unlikely(float64_is_signaling_nan(farg.d))) {
1073 /* sNaN round */
1074 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1075 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1076 /* qNan / infinity round */
1077 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1078 } else {
1079 set_float_rounding_mode(rounding_mode, &env->fp_status);
1080 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1081 /* Restore rounding mode from FPSCR */
1082 fpscr_set_rounding_mode();
1083 }
1084 return farg.ll;
1085}
1086
1087uint64_t helper_frin (uint64_t arg)
1088{
1089 return do_fri(arg, float_round_nearest_even);
1090}
1091
1092uint64_t helper_friz (uint64_t arg)
1093{
1094 return do_fri(arg, float_round_to_zero);
1095}
1096
1097uint64_t helper_frip (uint64_t arg)
1098{
1099 return do_fri(arg, float_round_up);
1100}
1101
1102uint64_t helper_frim (uint64_t arg)
1103{
1104 return do_fri(arg, float_round_down);
1105}
1106
1107/* fmadd - fmadd. */
1108uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1109{
1110 CPU_DoubleU farg1, farg2, farg3;
1111
1112 farg1.ll = arg1;
1113 farg2.ll = arg2;
1114 farg3.ll = arg3;
1115#if USE_PRECISE_EMULATION
1116 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1117 float64_is_signaling_nan(farg2.d) ||
1118 float64_is_signaling_nan(farg3.d))) {
1119 /* sNaN operation */
1120 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1121 } else {
1122#ifdef FLOAT128
1123 /* This is the way the PowerPC specification defines it */
1124 float128 ft0_128, ft1_128;
1125
1126 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1127 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1128 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1129 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1130 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1131 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1132#else
1133 /* This is OK on x86 hosts */
1134 farg1.d = (farg1.d * farg2.d) + farg3.d;
1135#endif
1136 }
1137#else
1138 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1139 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1140#endif
1141 return farg1.ll;
1142}
1143
1144/* fmsub - fmsub. */
1145uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1146{
1147 CPU_DoubleU farg1, farg2, farg3;
1148
1149 farg1.ll = arg1;
1150 farg2.ll = arg2;
1151 farg3.ll = arg3;
1152#if USE_PRECISE_EMULATION
1153 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1154 float64_is_signaling_nan(farg2.d) ||
1155 float64_is_signaling_nan(farg3.d))) {
1156 /* sNaN operation */
1157 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1158 } else {
1159#ifdef FLOAT128
1160 /* This is the way the PowerPC specification defines it */
1161 float128 ft0_128, ft1_128;
1162
1163 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1164 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1165 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1166 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1167 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1168 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1169#else
1170 /* This is OK on x86 hosts */
1171 farg1.d = (farg1.d * farg2.d) - farg3.d;
1172#endif
1173 }
1174#else
1175 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1176 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1177#endif
1178 return farg1.ll;
1179}
1180
1181/* fnmadd - fnmadd. */
1182uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1183{
1184 CPU_DoubleU farg1, farg2, farg3;
1185
1186 farg1.ll = arg1;
1187 farg2.ll = arg2;
1188 farg3.ll = arg3;
1189
1190 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1191 float64_is_signaling_nan(farg2.d) ||
1192 float64_is_signaling_nan(farg3.d))) {
1193 /* sNaN operation */
1194 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1195 } else {
1196#if USE_PRECISE_EMULATION
1197#ifdef FLOAT128
1198 /* This is the way the PowerPC specification defines it */
1199 float128 ft0_128, ft1_128;
1200
1201 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1202 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1203 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1204 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1205 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1206 farg1.d= float128_to_float64(ft0_128, &env->fp_status);
1207#else
1208 /* This is OK on x86 hosts */
1209 farg1.d = (farg1.d * farg2.d) + farg3.d;
1210#endif
1211#else
1212 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1213 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1214#endif
1215 if (likely(!isnan(farg1.d)))
1216 farg1.d = float64_chs(farg1.d);
1217 }
1218 return farg1.ll;
1219}
1220
1221/* fnmsub - fnmsub. */
1222uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1223{
1224 CPU_DoubleU farg1, farg2, farg3;
1225
1226 farg1.ll = arg1;
1227 farg2.ll = arg2;
1228 farg3.ll = arg3;
1229
1230 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1231 float64_is_signaling_nan(farg2.d) ||
1232 float64_is_signaling_nan(farg3.d))) {
1233 /* sNaN operation */
1234 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1235 } else {
1236#if USE_PRECISE_EMULATION
1237#ifdef FLOAT128
1238 /* This is the way the PowerPC specification defines it */
1239 float128 ft0_128, ft1_128;
1240
1241 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1242 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1243 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1244 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1245 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1246 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1247#else
1248 /* This is OK on x86 hosts */
1249 farg1.d = (farg1.d * farg2.d) - farg3.d;
1250#endif
1251#else
1252 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1253 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1254#endif
1255 if (likely(!isnan(farg1.d)))
1256 farg1.d = float64_chs(farg1.d);
1257 }
1258 return farg1.ll;
1259}
1260
1261/* frsp - frsp. */
1262uint64_t helper_frsp (uint64_t arg)
1263{
1264 CPU_DoubleU farg;
1265 farg.ll = arg;
1266
1267#if USE_PRECISE_EMULATION
1268 if (unlikely(float64_is_signaling_nan(farg.d))) {
1269 /* sNaN square root */
1270 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1271 } else {
1272 fard.d = float64_to_float32(farg.d, &env->fp_status);
1273 }
1274#else
1275 farg.d = float64_to_float32(farg.d, &env->fp_status);
1276#endif
1277 return farg.ll;
1278}
1279
1280/* fsqrt - fsqrt. */
1281uint64_t helper_fsqrt (uint64_t arg)
1282{
1283 CPU_DoubleU farg;
1284 farg.ll = arg;
1285
1286 if (unlikely(float64_is_signaling_nan(farg.d))) {
1287 /* sNaN square root */
1288 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1289 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1290 /* Square root of a negative nonzero number */
1291 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1292 } else {
1293 farg.d = float64_sqrt(farg.d, &env->fp_status);
1294 }
1295 return farg.ll;
1296}
1297
1298/* fre - fre. */
1299uint64_t helper_fre (uint64_t arg)
1300{
1301 CPU_DoubleU farg;
1302 farg.ll = arg;
1303
1304 if (unlikely(float64_is_signaling_nan(farg.d))) {
1305 /* sNaN reciprocal */
1306 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1307 } else if (unlikely(iszero(farg.d))) {
1308 /* Zero reciprocal */
1309 farg.ll = float_zero_divide_excp(1.0, farg.d);
1310 } else if (likely(isnormal(farg.d))) {
1311 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1312 } else {
1313 if (farg.ll == 0x8000000000000000ULL) {
1314 farg.ll = 0xFFF0000000000000ULL;
1315 } else if (farg.ll == 0x0000000000000000ULL) {
1316 farg.ll = 0x7FF0000000000000ULL;
1317 } else if (isnan(farg.d)) {
1318 farg.ll = 0x7FF8000000000000ULL;
1319 } else if (fpisneg(farg.d)) {
1320 farg.ll = 0x8000000000000000ULL;
1321 } else {
1322 farg.ll = 0x0000000000000000ULL;
1323 }
1324 }
1325 return farg.d;
1326}
1327
1328/* fres - fres. */
1329uint64_t helper_fres (uint64_t arg)
1330{
1331 CPU_DoubleU farg;
1332 farg.ll = arg;
1333
1334 if (unlikely(float64_is_signaling_nan(farg.d))) {
1335 /* sNaN reciprocal */
1336 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1337 } else if (unlikely(iszero(farg.d))) {
1338 /* Zero reciprocal */
1339 farg.ll = float_zero_divide_excp(1.0, farg.d);
1340 } else if (likely(isnormal(farg.d))) {
1341#if USE_PRECISE_EMULATION
1342 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1343 farg.d = float64_to_float32(farg.d, &env->fp_status);
1344#else
1345 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1346#endif
1347 } else {
1348 if (farg.ll == 0x8000000000000000ULL) {
1349 farg.ll = 0xFFF0000000000000ULL;
1350 } else if (farg.ll == 0x0000000000000000ULL) {
1351 farg.ll = 0x7FF0000000000000ULL;
1352 } else if (isnan(farg.d)) {
1353 farg.ll = 0x7FF8000000000000ULL;
1354 } else if (fpisneg(farg.d)) {
1355 farg.ll = 0x8000000000000000ULL;
1356 } else {
1357 farg.ll = 0x0000000000000000ULL;
1358 }
1359 }
1360 return farg.ll;
1361}
1362
1363/* frsqrte - frsqrte. */
1364uint64_t helper_frsqrte (uint64_t arg)
1365{
1366 CPU_DoubleU farg;
1367 farg.ll = arg;
1368
1369 if (unlikely(float64_is_signaling_nan(farg.d))) {
1370 /* sNaN reciprocal square root */
1371 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1372 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1373 /* Reciprocal square root of a negative nonzero number */
1374 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1375 } else if (likely(isnormal(farg.d))) {
1376 farg.d = float64_sqrt(farg.d, &env->fp_status);
1377 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1378 } else {
1379 if (farg.ll == 0x8000000000000000ULL) {
1380 farg.ll = 0xFFF0000000000000ULL;
1381 } else if (farg.ll == 0x0000000000000000ULL) {
1382 farg.ll = 0x7FF0000000000000ULL;
1383 } else if (isnan(farg.d)) {
1384 farg.ll |= 0x000FFFFFFFFFFFFFULL;
1385 } else if (fpisneg(farg.d)) {
1386 farg.ll = 0x7FF8000000000000ULL;
1387 } else {
1388 farg.ll = 0x0000000000000000ULL;
1389 }
1390 }
1391 return farg.ll;
1392}
1393
1394/* fsel - fsel. */
1395uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1396{
1397 CPU_DoubleU farg1, farg2, farg3;
1398
1399 farg1.ll = arg1;
1400 farg2.ll = arg2;
1401 farg3.ll = arg3;
1402
1403 if (!fpisneg(farg1.d) || iszero(farg1.d))
1404 return farg2.ll;
1405 else
1406 return farg2.ll;
1407}
1408
1409uint32_t helper_fcmpu (uint64_t arg1, uint64_t arg2)
1410{
1411 CPU_DoubleU farg1, farg2;
1412 uint32_t ret = 0;
1413 farg1.ll = arg1;
1414 farg2.ll = arg2;
1415
1416 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1417 float64_is_signaling_nan(farg2.d))) {
1418 /* sNaN comparison */
1419 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1420 } else {
1421 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1422 ret = 0x08UL;
1423 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1424 ret = 0x04UL;
1425 } else {
1426 ret = 0x02UL;
1427 }
1428 }
1429 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1430 env->fpscr |= ret << FPSCR_FPRF;
1431 return ret;
1432}
1433
1434uint32_t helper_fcmpo (uint64_t arg1, uint64_t arg2)
1435{
1436 CPU_DoubleU farg1, farg2;
1437 uint32_t ret = 0;
1438 farg1.ll = arg1;
1439 farg2.ll = arg2;
1440
1441 if (unlikely(float64_is_nan(farg1.d) ||
1442 float64_is_nan(farg2.d))) {
1443 if (float64_is_signaling_nan(farg1.d) ||
1444 float64_is_signaling_nan(farg2.d)) {
1445 /* sNaN comparison */
1446 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1447 POWERPC_EXCP_FP_VXVC);
1448 } else {
1449 /* qNaN comparison */
1450 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1451 }
1452 } else {
1453 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1454 ret = 0x08UL;
1455 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1456 ret = 0x04UL;
1457 } else {
1458 ret = 0x02UL;
1459 }
1460 }
1461 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1462 env->fpscr |= ret << FPSCR_FPRF;
1463 return ret;
1464}
1465
1466#if !defined (CONFIG_USER_ONLY)
1467void cpu_dump_rfi (target_ulong RA, target_ulong msr);
1468
1469void do_store_msr (void)
1470{
1471 T0 = hreg_store_msr(env, T0, 0);
1472 if (T0 != 0) {
1473 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1474 raise_exception(env, T0);
1475 }
1476}
1477
1478static always_inline void __do_rfi (target_ulong nip, target_ulong msr,
1479 target_ulong msrm, int keep_msrh)
1480{
1481#if defined(TARGET_PPC64)
1482 if (msr & (1ULL << MSR_SF)) {
1483 nip = (uint64_t)nip;
1484 msr &= (uint64_t)msrm;
1485 } else {
1486 nip = (uint32_t)nip;
1487 msr = (uint32_t)(msr & msrm);
1488 if (keep_msrh)
1489 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1490 }
1491#else
1492 nip = (uint32_t)nip;
1493 msr &= (uint32_t)msrm;
1494#endif
1495 /* XXX: beware: this is false if VLE is supported */
1496 env->nip = nip & ~((target_ulong)0x00000003);
1497 hreg_store_msr(env, msr, 1);
1498#if defined (DEBUG_OP)
1499 cpu_dump_rfi(env->nip, env->msr);
1500#endif
1501 /* No need to raise an exception here,
1502 * as rfi is always the last insn of a TB
1503 */
1504 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1505}
1506
1507void do_rfi (void)
1508{
1509 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1510 ~((target_ulong)0xFFFF0000), 1);
1511}
1512
1513#if defined(TARGET_PPC64)
1514void do_rfid (void)
1515{
1516 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1517 ~((target_ulong)0xFFFF0000), 0);
1518}
1519
1520void do_hrfid (void)
1521{
1522 __do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1523 ~((target_ulong)0xFFFF0000), 0);
1524}
1525#endif
1526#endif
1527
1528void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1529{
1530 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1531 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1532 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1533 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1534 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1535 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1536 }
1537}
1538
1539#if defined(TARGET_PPC64)
1540void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1541{
1542 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1543 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1544 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1545 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1546 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1547 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1548}
1549#endif
1550
1551/*****************************************************************************/
1552/* PowerPC 601 specific instructions (POWER bridge) */
1553void do_POWER_abso (void)
1554{
1555 if ((int32_t)T0 == INT32_MIN) {
1556 T0 = INT32_MAX;
1557 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1558 } else if ((int32_t)T0 < 0) {
1559 T0 = -T0;
1560 env->xer &= ~(1 << XER_OV);
1561 } else {
1562 env->xer &= ~(1 << XER_OV);
1563 }
1564}
1565
1566void do_POWER_clcs (void)
1567{
1568 switch (T0) {
1569 case 0x0CUL:
1570 /* Instruction cache line size */
1571 T0 = env->icache_line_size;
1572 break;
1573 case 0x0DUL:
1574 /* Data cache line size */
1575 T0 = env->dcache_line_size;
1576 break;
1577 case 0x0EUL:
1578 /* Minimum cache line size */
1579 T0 = env->icache_line_size < env->dcache_line_size ?
1580 env->icache_line_size : env->dcache_line_size;
1581 break;
1582 case 0x0FUL:
1583 /* Maximum cache line size */
1584 T0 = env->icache_line_size > env->dcache_line_size ?
1585 env->icache_line_size : env->dcache_line_size;
1586 break;
1587 default:
1588 /* Undefined */
1589 break;
1590 }
1591}
1592
1593void do_POWER_div (void)
1594{
1595 uint64_t tmp;
1596
1597 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1598 (int32_t)T1 == 0) {
1599 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1600 env->spr[SPR_MQ] = 0;
1601 } else {
1602 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1603 env->spr[SPR_MQ] = tmp % T1;
1604 T0 = tmp / (int32_t)T1;
1605 }
1606}
1607
1608void do_POWER_divo (void)
1609{
1610 int64_t tmp;
1611
1612 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1613 (int32_t)T1 == 0) {
1614 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1615 env->spr[SPR_MQ] = 0;
1616 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1617 } else {
1618 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1619 env->spr[SPR_MQ] = tmp % T1;
1620 tmp /= (int32_t)T1;
1621 if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) {
1622 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1623 } else {
1624 env->xer &= ~(1 << XER_OV);
1625 }
1626 T0 = tmp;
1627 }
1628}
1629
1630void do_POWER_divs (void)
1631{
1632 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1633 (int32_t)T1 == 0) {
1634 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1635 env->spr[SPR_MQ] = 0;
1636 } else {
1637 env->spr[SPR_MQ] = T0 % T1;
1638 T0 = (int32_t)T0 / (int32_t)T1;
1639 }
1640}
1641
1642void do_POWER_divso (void)
1643{
1644 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1645 (int32_t)T1 == 0) {
1646 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1647 env->spr[SPR_MQ] = 0;
1648 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1649 } else {
1650 T0 = (int32_t)T0 / (int32_t)T1;
1651 env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1;
1652 env->xer &= ~(1 << XER_OV);
1653 }
1654}
1655
1656void do_POWER_dozo (void)
1657{
1658 if ((int32_t)T1 > (int32_t)T0) {
1659 T2 = T0;
1660 T0 = T1 - T0;
1661 if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
1662 ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) {
1663 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1664 } else {
1665 env->xer &= ~(1 << XER_OV);
1666 }
1667 } else {
1668 T0 = 0;
1669 env->xer &= ~(1 << XER_OV);
1670 }
1671}
1672
1673void do_POWER_maskg (void)
1674{
1675 uint32_t ret;
1676
1677 if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
1678 ret = UINT32_MAX;
1679 } else {
1680 ret = (UINT32_MAX >> ((uint32_t)T0)) ^
1681 ((UINT32_MAX >> ((uint32_t)T1)) >> 1);
1682 if ((uint32_t)T0 > (uint32_t)T1)
1683 ret = ~ret;
1684 }
1685 T0 = ret;
1686}
1687
1688void do_POWER_mulo (void)
1689{
1690 uint64_t tmp;
1691
1692 tmp = (uint64_t)T0 * (uint64_t)T1;
1693 env->spr[SPR_MQ] = tmp >> 32;
1694 T0 = tmp;
1695 if (tmp >> 32 != ((uint64_t)T0 >> 16) * ((uint64_t)T1 >> 16)) {
1696 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1697 } else {
1698 env->xer &= ~(1 << XER_OV);
1699 }
1700}
1701
1702#if !defined (CONFIG_USER_ONLY)
1703void do_POWER_rac (void)
1704{
1705 mmu_ctx_t ctx;
1706 int nb_BATs;
1707
1708 /* We don't have to generate many instances of this instruction,
1709 * as rac is supervisor only.
1710 */
1711 /* XXX: FIX THIS: Pretend we have no BAT */
1712 nb_BATs = env->nb_BATs;
1713 env->nb_BATs = 0;
1714 if (get_physical_address(env, &ctx, T0, 0, ACCESS_INT) == 0)
1715 T0 = ctx.raddr;
1716 env->nb_BATs = nb_BATs;
1717}
1718
1719void do_POWER_rfsvc (void)
1720{
1721 __do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1722}
1723
1724void do_store_hid0_601 (void)
1725{
1726 uint32_t hid0;
1727
1728 hid0 = env->spr[SPR_HID0];
1729 if ((T0 ^ hid0) & 0x00000008) {
1730 /* Change current endianness */
1731 env->hflags &= ~(1 << MSR_LE);
1732 env->hflags_nmsr &= ~(1 << MSR_LE);
1733 env->hflags_nmsr |= (1 << MSR_LE) & (((T0 >> 3) & 1) << MSR_LE);
1734 env->hflags |= env->hflags_nmsr;
1735 if (loglevel != 0) {
1736 fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
1737 __func__, T0 & 0x8 ? 'l' : 'b', env->hflags);
1738 }
1739 }
1740 env->spr[SPR_HID0] = T0;
1741}
1742#endif
1743
1744/*****************************************************************************/
1745/* 602 specific instructions */
1746/* mfrom is the most crazy instruction ever seen, imho ! */
1747/* Real implementation uses a ROM table. Do the same */
1748#define USE_MFROM_ROM_TABLE
1749target_ulong helper_602_mfrom (target_ulong arg)
1750{
1751 if (likely(arg < 602)) {
1752#if defined(USE_MFROM_ROM_TABLE)
1753#include "mfrom_table.c"
1754 return mfrom_ROM_table[T0];
1755#else
1756 double d;
1757 /* Extremly decomposed:
1758 * -arg / 256
1759 * return 256 * log10(10 + 1.0) + 0.5
1760 */
1761 d = arg;
1762 d = float64_div(d, 256, &env->fp_status);
1763 d = float64_chs(d);
1764 d = exp10(d); // XXX: use float emulation function
1765 d = float64_add(d, 1.0, &env->fp_status);
1766 d = log10(d); // XXX: use float emulation function
1767 d = float64_mul(d, 256, &env->fp_status);
1768 d = float64_add(d, 0.5, &env->fp_status);
1769 return float64_round_to_int(d, &env->fp_status);
1770#endif
1771 } else {
1772 return 0;
1773 }
1774}
1775
1776/*****************************************************************************/
1777/* Embedded PowerPC specific helpers */
1778
1779/* XXX: to be improved to check access rights when in user-mode */
1780void do_load_dcr (void)
1781{
1782 target_ulong val;
1783
1784 if (unlikely(env->dcr_env == NULL)) {
1785 if (loglevel != 0) {
1786 fprintf(logfile, "No DCR environment\n");
1787 }
1788 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1789 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1790 } else if (unlikely(ppc_dcr_read(env->dcr_env, T0, &val) != 0)) {
1791 if (loglevel != 0) {
1792 fprintf(logfile, "DCR read error %d %03x\n", (int)T0, (int)T0);
1793 }
1794 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1795 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1796 } else {
1797 T0 = val;
1798 }
1799}
1800
1801void do_store_dcr (void)
1802{
1803 if (unlikely(env->dcr_env == NULL)) {
1804 if (loglevel != 0) {
1805 fprintf(logfile, "No DCR environment\n");
1806 }
1807 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1808 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1809 } else if (unlikely(ppc_dcr_write(env->dcr_env, T0, T1) != 0)) {
1810 if (loglevel != 0) {
1811 fprintf(logfile, "DCR write error %d %03x\n", (int)T0, (int)T0);
1812 }
1813 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1814 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1815 }
1816}
1817
1818#if !defined(CONFIG_USER_ONLY)
1819void do_40x_rfci (void)
1820{
1821 __do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1822 ~((target_ulong)0xFFFF0000), 0);
1823}
1824
1825void do_rfci (void)
1826{
1827 __do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1828 ~((target_ulong)0x3FFF0000), 0);
1829}
1830
1831void do_rfdi (void)
1832{
1833 __do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1834 ~((target_ulong)0x3FFF0000), 0);
1835}
1836
1837void do_rfmci (void)
1838{
1839 __do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1840 ~((target_ulong)0x3FFF0000), 0);
1841}
1842
1843void do_load_403_pb (int num)
1844{
1845 T0 = env->pb[num];
1846}
1847
1848void do_store_403_pb (int num)
1849{
1850 if (likely(env->pb[num] != T0)) {
1851 env->pb[num] = T0;
1852 /* Should be optimized */
1853 tlb_flush(env, 1);
1854 }
1855}
1856#endif
1857
1858/* 440 specific */
1859void do_440_dlmzb (void)
1860{
1861 target_ulong mask;
1862 int i;
1863
1864 i = 1;
1865 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1866 if ((T0 & mask) == 0)
1867 goto done;
1868 i++;
1869 }
1870 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1871 if ((T1 & mask) == 0)
1872 break;
1873 i++;
1874 }
1875 done:
1876 T0 = i;
1877}
1878
1879/*****************************************************************************/
1880/* SPE extension helpers */
1881/* Use a table to make this quicker */
1882static uint8_t hbrev[16] = {
1883 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1884 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1885};
1886
1887static always_inline uint8_t byte_reverse (uint8_t val)
1888{
1889 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1890}
1891
1892static always_inline uint32_t word_reverse (uint32_t val)
1893{
1894 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1895 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1896}
1897
1898#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
1899target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
1900{
1901 uint32_t a, b, d, mask;
1902
1903 mask = UINT32_MAX >> (32 - MASKBITS);
1904 a = arg1 & mask;
1905 b = arg2 & mask;
1906 d = word_reverse(1 + word_reverse(a | ~b));
1907 return (arg1 & ~mask) | (d & b);
1908}
1909
1910uint32_t helper_cntlsw32 (uint32_t val)
1911{
1912 if (val & 0x80000000)
1913 return clz32(~val);
1914 else
1915 return clz32(val);
1916}
1917
1918uint32_t helper_cntlzw32 (uint32_t val)
1919{
1920 return clz32(val);
1921}
1922
1923/* Single-precision floating-point conversions */
1924static always_inline uint32_t efscfsi (uint32_t val)
1925{
1926 CPU_FloatU u;
1927
1928 u.f = int32_to_float32(val, &env->spe_status);
1929
1930 return u.l;
1931}
1932
1933static always_inline uint32_t efscfui (uint32_t val)
1934{
1935 CPU_FloatU u;
1936
1937 u.f = uint32_to_float32(val, &env->spe_status);
1938
1939 return u.l;
1940}
1941
1942static always_inline int32_t efsctsi (uint32_t val)
1943{
1944 CPU_FloatU u;
1945
1946 u.l = val;
1947 /* NaN are not treated the same way IEEE 754 does */
1948 if (unlikely(isnan(u.f)))
1949 return 0;
1950
1951 return float32_to_int32(u.f, &env->spe_status);
1952}
1953
1954static always_inline uint32_t efsctui (uint32_t val)
1955{
1956 CPU_FloatU u;
1957
1958 u.l = val;
1959 /* NaN are not treated the same way IEEE 754 does */
1960 if (unlikely(isnan(u.f)))
1961 return 0;
1962
1963 return float32_to_uint32(u.f, &env->spe_status);
1964}
1965
1966static always_inline uint32_t efsctsiz (uint32_t val)
1967{
1968 CPU_FloatU u;
1969
1970 u.l = val;
1971 /* NaN are not treated the same way IEEE 754 does */
1972 if (unlikely(isnan(u.f)))
1973 return 0;
1974
1975 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
1976}
1977
1978static always_inline uint32_t efsctuiz (uint32_t val)
1979{
1980 CPU_FloatU u;
1981
1982 u.l = val;
1983 /* NaN are not treated the same way IEEE 754 does */
1984 if (unlikely(isnan(u.f)))
1985 return 0;
1986
1987 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
1988}
1989
1990static always_inline uint32_t efscfsf (uint32_t val)
1991{
1992 CPU_FloatU u;
1993 float32 tmp;
1994
1995 u.f = int32_to_float32(val, &env->spe_status);
1996 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
1997 u.f = float32_div(u.f, tmp, &env->spe_status);
1998
1999 return u.l;
2000}
2001
2002static always_inline uint32_t efscfuf (uint32_t val)
2003{
2004 CPU_FloatU u;
2005 float32 tmp;
2006
2007 u.f = uint32_to_float32(val, &env->spe_status);
2008 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2009 u.f = float32_div(u.f, tmp, &env->spe_status);
2010
2011 return u.l;
2012}
2013
2014static always_inline uint32_t efsctsf (uint32_t val)
2015{
2016 CPU_FloatU u;
2017 float32 tmp;
2018
2019 u.l = val;
2020 /* NaN are not treated the same way IEEE 754 does */
2021 if (unlikely(isnan(u.f)))
2022 return 0;
2023 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2024 u.f = float32_mul(u.f, tmp, &env->spe_status);
2025
2026 return float32_to_int32(u.f, &env->spe_status);
2027}
2028
2029static always_inline uint32_t efsctuf (uint32_t val)
2030{
2031 CPU_FloatU u;
2032 float32 tmp;
2033
2034 u.l = val;
2035 /* NaN are not treated the same way IEEE 754 does */
2036 if (unlikely(isnan(u.f)))
2037 return 0;
2038 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2039 u.f = float32_mul(u.f, tmp, &env->spe_status);
2040
2041 return float32_to_uint32(u.f, &env->spe_status);
2042}
2043
2044#define HELPER_SPE_SINGLE_CONV(name) \
2045uint32_t helper_e##name (uint32_t val) \
2046{ \
2047 return e##name(val); \
2048}
2049/* efscfsi */
2050HELPER_SPE_SINGLE_CONV(fscfsi);
2051/* efscfui */
2052HELPER_SPE_SINGLE_CONV(fscfui);
2053/* efscfuf */
2054HELPER_SPE_SINGLE_CONV(fscfuf);
2055/* efscfsf */
2056HELPER_SPE_SINGLE_CONV(fscfsf);
2057/* efsctsi */
2058HELPER_SPE_SINGLE_CONV(fsctsi);
2059/* efsctui */
2060HELPER_SPE_SINGLE_CONV(fsctui);
2061/* efsctsiz */
2062HELPER_SPE_SINGLE_CONV(fsctsiz);
2063/* efsctuiz */
2064HELPER_SPE_SINGLE_CONV(fsctuiz);
2065/* efsctsf */
2066HELPER_SPE_SINGLE_CONV(fsctsf);
2067/* efsctuf */
2068HELPER_SPE_SINGLE_CONV(fsctuf);
2069
2070#define HELPER_SPE_VECTOR_CONV(name) \
2071uint64_t helper_ev##name (uint64_t val) \
2072{ \
2073 return ((uint64_t)e##name(val >> 32) << 32) | \
2074 (uint64_t)e##name(val); \
2075}
2076/* evfscfsi */
2077HELPER_SPE_VECTOR_CONV(fscfsi);
2078/* evfscfui */
2079HELPER_SPE_VECTOR_CONV(fscfui);
2080/* evfscfuf */
2081HELPER_SPE_VECTOR_CONV(fscfuf);
2082/* evfscfsf */
2083HELPER_SPE_VECTOR_CONV(fscfsf);
2084/* evfsctsi */
2085HELPER_SPE_VECTOR_CONV(fsctsi);
2086/* evfsctui */
2087HELPER_SPE_VECTOR_CONV(fsctui);
2088/* evfsctsiz */
2089HELPER_SPE_VECTOR_CONV(fsctsiz);
2090/* evfsctuiz */
2091HELPER_SPE_VECTOR_CONV(fsctuiz);
2092/* evfsctsf */
2093HELPER_SPE_VECTOR_CONV(fsctsf);
2094/* evfsctuf */
2095HELPER_SPE_VECTOR_CONV(fsctuf);
2096
2097/* Single-precision floating-point arithmetic */
2098static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
2099{
2100 CPU_FloatU u1, u2;
2101 u1.l = op1;
2102 u2.l = op2;
2103 u1.f = float32_add(u1.f, u2.f, &env->spe_status);
2104 return u1.l;
2105}
2106
2107static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
2108{
2109 CPU_FloatU u1, u2;
2110 u1.l = op1;
2111 u2.l = op2;
2112 u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
2113 return u1.l;
2114}
2115
2116static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
2117{
2118 CPU_FloatU u1, u2;
2119 u1.l = op1;
2120 u2.l = op2;
2121 u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
2122 return u1.l;
2123}
2124
2125static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
2126{
2127 CPU_FloatU u1, u2;
2128 u1.l = op1;
2129 u2.l = op2;
2130 u1.f = float32_div(u1.f, u2.f, &env->spe_status);
2131 return u1.l;
2132}
2133
2134#define HELPER_SPE_SINGLE_ARITH(name) \
2135uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2136{ \
2137 return e##name(op1, op2); \
2138}
2139/* efsadd */
2140HELPER_SPE_SINGLE_ARITH(fsadd);
2141/* efssub */
2142HELPER_SPE_SINGLE_ARITH(fssub);
2143/* efsmul */
2144HELPER_SPE_SINGLE_ARITH(fsmul);
2145/* efsdiv */
2146HELPER_SPE_SINGLE_ARITH(fsdiv);
2147
2148#define HELPER_SPE_VECTOR_ARITH(name) \
2149uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
2150{ \
2151 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
2152 (uint64_t)e##name(op1, op2); \
2153}
2154/* evfsadd */
2155HELPER_SPE_VECTOR_ARITH(fsadd);
2156/* evfssub */
2157HELPER_SPE_VECTOR_ARITH(fssub);
2158/* evfsmul */
2159HELPER_SPE_VECTOR_ARITH(fsmul);
2160/* evfsdiv */
2161HELPER_SPE_VECTOR_ARITH(fsdiv);
2162
2163/* Single-precision floating-point comparisons */
2164static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
2165{
2166 CPU_FloatU u1, u2;
2167 u1.l = op1;
2168 u2.l = op2;
2169 return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2170}
2171
2172static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
2173{
2174 CPU_FloatU u1, u2;
2175 u1.l = op1;
2176 u2.l = op2;
2177 return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
2178}
2179
2180static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
2181{
2182 CPU_FloatU u1, u2;
2183 u1.l = op1;
2184 u2.l = op2;
2185 return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2186}
2187
2188static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
2189{
2190 /* XXX: TODO: test special values (NaN, infinites, ...) */
2191 return efststlt(op1, op2);
2192}
2193
2194static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
2195{
2196 /* XXX: TODO: test special values (NaN, infinites, ...) */
2197 return efststgt(op1, op2);
2198}
2199
2200static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
2201{
2202 /* XXX: TODO: test special values (NaN, infinites, ...) */
2203 return efststeq(op1, op2);
2204}
2205
2206#define HELPER_SINGLE_SPE_CMP(name) \
2207uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2208{ \
2209 return e##name(op1, op2) << 2; \
2210}
2211/* efststlt */
2212HELPER_SINGLE_SPE_CMP(fststlt);
2213/* efststgt */
2214HELPER_SINGLE_SPE_CMP(fststgt);
2215/* efststeq */
2216HELPER_SINGLE_SPE_CMP(fststeq);
2217/* efscmplt */
2218HELPER_SINGLE_SPE_CMP(fscmplt);
2219/* efscmpgt */
2220HELPER_SINGLE_SPE_CMP(fscmpgt);
2221/* efscmpeq */
2222HELPER_SINGLE_SPE_CMP(fscmpeq);
2223
2224static always_inline uint32_t evcmp_merge (int t0, int t1)
2225{
2226 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2227}
2228
2229#define HELPER_VECTOR_SPE_CMP(name) \
2230uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
2231{ \
2232 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
2233}
2234/* evfststlt */
2235HELPER_VECTOR_SPE_CMP(fststlt);
2236/* evfststgt */
2237HELPER_VECTOR_SPE_CMP(fststgt);
2238/* evfststeq */
2239HELPER_VECTOR_SPE_CMP(fststeq);
2240/* evfscmplt */
2241HELPER_VECTOR_SPE_CMP(fscmplt);
2242/* evfscmpgt */
2243HELPER_VECTOR_SPE_CMP(fscmpgt);
2244/* evfscmpeq */
2245HELPER_VECTOR_SPE_CMP(fscmpeq);
2246
2247/* Double-precision floating-point conversion */
2248uint64_t helper_efdcfsi (uint32_t val)
2249{
2250 CPU_DoubleU u;
2251
2252 u.d = int32_to_float64(val, &env->spe_status);
2253
2254 return u.ll;
2255}
2256
2257uint64_t helper_efdcfsid (uint64_t val)
2258{
2259 CPU_DoubleU u;
2260
2261 u.d = int64_to_float64(val, &env->spe_status);
2262
2263 return u.ll;
2264}
2265
2266uint64_t helper_efdcfui (uint32_t val)
2267{
2268 CPU_DoubleU u;
2269
2270 u.d = uint32_to_float64(val, &env->spe_status);
2271
2272 return u.ll;
2273}
2274
2275uint64_t helper_efdcfuid (uint64_t val)
2276{
2277 CPU_DoubleU u;
2278
2279 u.d = uint64_to_float64(val, &env->spe_status);
2280
2281 return u.ll;
2282}
2283
2284uint32_t helper_efdctsi (uint64_t val)
2285{
2286 CPU_DoubleU u;
2287
2288 u.ll = val;
2289 /* NaN are not treated the same way IEEE 754 does */
2290 if (unlikely(isnan(u.d)))
2291 return 0;
2292
2293 return float64_to_int32(u.d, &env->spe_status);
2294}
2295
2296uint32_t helper_efdctui (uint64_t val)
2297{
2298 CPU_DoubleU u;
2299
2300 u.ll = val;
2301 /* NaN are not treated the same way IEEE 754 does */
2302 if (unlikely(isnan(u.d)))
2303 return 0;
2304
2305 return float64_to_uint32(u.d, &env->spe_status);
2306}
2307
2308uint32_t helper_efdctsiz (uint64_t val)
2309{
2310 CPU_DoubleU u;
2311
2312 u.ll = val;
2313 /* NaN are not treated the same way IEEE 754 does */
2314 if (unlikely(isnan(u.d)))
2315 return 0;
2316
2317 return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2318}
2319
2320uint64_t helper_efdctsidz (uint64_t val)
2321{
2322 CPU_DoubleU u;
2323
2324 u.ll = val;
2325 /* NaN are not treated the same way IEEE 754 does */
2326 if (unlikely(isnan(u.d)))
2327 return 0;
2328
2329 return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2330}
2331
2332uint32_t helper_efdctuiz (uint64_t val)
2333{
2334 CPU_DoubleU u;
2335
2336 u.ll = val;
2337 /* NaN are not treated the same way IEEE 754 does */
2338 if (unlikely(isnan(u.d)))
2339 return 0;
2340
2341 return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2342}
2343
2344uint64_t helper_efdctuidz (uint64_t val)
2345{
2346 CPU_DoubleU u;
2347
2348 u.ll = val;
2349 /* NaN are not treated the same way IEEE 754 does */
2350 if (unlikely(isnan(u.d)))
2351 return 0;
2352
2353 return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2354}
2355
2356uint64_t helper_efdcfsf (uint32_t val)
2357{
2358 CPU_DoubleU u;
2359 float64 tmp;
2360
2361 u.d = int32_to_float64(val, &env->spe_status);
2362 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2363 u.d = float64_div(u.d, tmp, &env->spe_status);
2364
2365 return u.ll;
2366}
2367
2368uint64_t helper_efdcfuf (uint32_t val)
2369{
2370 CPU_DoubleU u;
2371 float64 tmp;
2372
2373 u.d = uint32_to_float64(val, &env->spe_status);
2374 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2375 u.d = float64_div(u.d, tmp, &env->spe_status);
2376
2377 return u.ll;
2378}
2379
2380uint32_t helper_efdctsf (uint64_t val)
2381{
2382 CPU_DoubleU u;
2383 float64 tmp;
2384
2385 u.ll = val;
2386 /* NaN are not treated the same way IEEE 754 does */
2387 if (unlikely(isnan(u.d)))
2388 return 0;
2389 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2390 u.d = float64_mul(u.d, tmp, &env->spe_status);
2391
2392 return float64_to_int32(u.d, &env->spe_status);
2393}
2394
2395uint32_t helper_efdctuf (uint64_t val)
2396{
2397 CPU_DoubleU u;
2398 float64 tmp;
2399
2400 u.ll = val;
2401 /* NaN are not treated the same way IEEE 754 does */
2402 if (unlikely(isnan(u.d)))
2403 return 0;
2404 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2405 u.d = float64_mul(u.d, tmp, &env->spe_status);
2406
2407 return float64_to_uint32(u.d, &env->spe_status);
2408}
2409
2410uint32_t helper_efscfd (uint64_t val)
2411{
2412 CPU_DoubleU u1;
2413 CPU_FloatU u2;
2414
2415 u1.ll = val;
2416 u2.f = float64_to_float32(u1.d, &env->spe_status);
2417
2418 return u2.l;
2419}
2420
2421uint64_t helper_efdcfs (uint32_t val)
2422{
2423 CPU_DoubleU u2;
2424 CPU_FloatU u1;
2425
2426 u1.l = val;
2427 u2.d = float32_to_float64(u1.f, &env->spe_status);
2428
2429 return u2.ll;
2430}
2431
2432/* Double precision fixed-point arithmetic */
2433uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
2434{
2435 CPU_DoubleU u1, u2;
2436 u1.ll = op1;
2437 u2.ll = op2;
2438 u1.d = float64_add(u1.d, u2.d, &env->spe_status);
2439 return u1.ll;
2440}
2441
2442uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
2443{
2444 CPU_DoubleU u1, u2;
2445 u1.ll = op1;
2446 u2.ll = op2;
2447 u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
2448 return u1.ll;
2449}
2450
2451uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
2452{
2453 CPU_DoubleU u1, u2;
2454 u1.ll = op1;
2455 u2.ll = op2;
2456 u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
2457 return u1.ll;
2458}
2459
2460uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
2461{
2462 CPU_DoubleU u1, u2;
2463 u1.ll = op1;
2464 u2.ll = op2;
2465 u1.d = float64_div(u1.d, u2.d, &env->spe_status);
2466 return u1.ll;
2467}
2468
2469/* Double precision floating point helpers */
2470uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
2471{
2472 CPU_DoubleU u1, u2;
2473 u1.ll = op1;
2474 u2.ll = op2;
2475 return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2476}
2477
2478uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
2479{
2480 CPU_DoubleU u1, u2;
2481 u1.ll = op1;
2482 u2.ll = op2;
2483 return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
2484}
2485
2486uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
2487{
2488 CPU_DoubleU u1, u2;
2489 u1.ll = op1;
2490 u2.ll = op2;
2491 return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2492}
2493
2494uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
2495{
2496 /* XXX: TODO: test special values (NaN, infinites, ...) */
2497 return helper_efdtstlt(op1, op2);
2498}
2499
2500uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
2501{
2502 /* XXX: TODO: test special values (NaN, infinites, ...) */
2503 return helper_efdtstgt(op1, op2);
2504}
2505
2506uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
2507{
2508 /* XXX: TODO: test special values (NaN, infinites, ...) */
2509 return helper_efdtsteq(op1, op2);
2510}
2511
2512/*****************************************************************************/
2513/* Softmmu support */
2514#if !defined (CONFIG_USER_ONLY)
2515
2516#define MMUSUFFIX _mmu
2517
2518#define SHIFT 0
2519#include "softmmu_template.h"
2520
2521#define SHIFT 1
2522#include "softmmu_template.h"
2523
2524#define SHIFT 2
2525#include "softmmu_template.h"
2526
2527#define SHIFT 3
2528#include "softmmu_template.h"
2529
2530/* try to fill the TLB and return an exception if error. If retaddr is
2531 NULL, it means that the function was called in C code (i.e. not
2532 from generated code or from helper.c) */
2533/* XXX: fix it to restore all registers */
2534void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2535{
2536 TranslationBlock *tb;
2537 CPUState *saved_env;
2538 unsigned long pc;
2539 int ret;
2540
2541 /* XXX: hack to restore env in all cases, even if not called from
2542 generated code */
2543 saved_env = env;
2544 env = cpu_single_env;
2545 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2546 if (unlikely(ret != 0)) {
2547 if (likely(retaddr)) {
2548 /* now we have a real cpu fault */
2549 pc = (unsigned long)retaddr;
2550 tb = tb_find_pc(pc);
2551 if (likely(tb)) {
2552 /* the PC is inside the translated code. It means that we have
2553 a virtual CPU fault */
2554 cpu_restore_state(tb, env, pc, NULL);
2555 }
2556 }
2557 raise_exception_err(env, env->exception_index, env->error_code);
2558 }
2559 env = saved_env;
2560}
2561
2562/* Software driven TLBs management */
2563/* PowerPC 602/603 software TLB load instructions helpers */
2564static void helper_load_6xx_tlb (target_ulong new_EPN, int is_code)
2565{
2566 target_ulong RPN, CMP, EPN;
2567 int way;
2568
2569 RPN = env->spr[SPR_RPA];
2570 if (is_code) {
2571 CMP = env->spr[SPR_ICMP];
2572 EPN = env->spr[SPR_IMISS];
2573 } else {
2574 CMP = env->spr[SPR_DCMP];
2575 EPN = env->spr[SPR_DMISS];
2576 }
2577 way = (env->spr[SPR_SRR1] >> 17) & 1;
2578#if defined (DEBUG_SOFTWARE_TLB)
2579 if (loglevel != 0) {
2580 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2581 " PTE1 " ADDRX " way %d\n",
2582 __func__, T0, EPN, CMP, RPN, way);
2583 }
2584#endif
2585 /* Store this TLB */
2586 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2587 way, is_code, CMP, RPN);
2588}
2589
2590void helper_load_6xx_tlbd (target_ulong EPN)
2591{
2592 helper_load_6xx_tlb(EPN, 0);
2593}
2594
2595void helper_load_6xx_tlbi (target_ulong EPN)
2596{
2597 helper_load_6xx_tlb(EPN, 1);
2598}
2599
2600/* PowerPC 74xx software TLB load instructions helpers */
2601static void helper_load_74xx_tlb (target_ulong new_EPN, int is_code)
2602{
2603 target_ulong RPN, CMP, EPN;
2604 int way;
2605
2606 RPN = env->spr[SPR_PTELO];
2607 CMP = env->spr[SPR_PTEHI];
2608 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2609 way = env->spr[SPR_TLBMISS] & 0x3;
2610#if defined (DEBUG_SOFTWARE_TLB)
2611 if (loglevel != 0) {
2612 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2613 " PTE1 " ADDRX " way %d\n",
2614 __func__, T0, EPN, CMP, RPN, way);
2615 }
2616#endif
2617 /* Store this TLB */
2618 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2619 way, is_code, CMP, RPN);
2620}
2621
2622void helper_load_74xx_tlbd (target_ulong EPN)
2623{
2624 helper_load_74xx_tlb(EPN, 0);
2625}
2626
2627void helper_load_74xx_tlbi (target_ulong EPN)
2628{
2629 helper_load_74xx_tlb(EPN, 1);
2630}
2631
2632static always_inline target_ulong booke_tlb_to_page_size (int size)
2633{
2634 return 1024 << (2 * size);
2635}
2636
2637static always_inline int booke_page_size_to_tlb (target_ulong page_size)
2638{
2639 int size;
2640
2641 switch (page_size) {
2642 case 0x00000400UL:
2643 size = 0x0;
2644 break;
2645 case 0x00001000UL:
2646 size = 0x1;
2647 break;
2648 case 0x00004000UL:
2649 size = 0x2;
2650 break;
2651 case 0x00010000UL:
2652 size = 0x3;
2653 break;
2654 case 0x00040000UL:
2655 size = 0x4;
2656 break;
2657 case 0x00100000UL:
2658 size = 0x5;
2659 break;
2660 case 0x00400000UL:
2661 size = 0x6;
2662 break;
2663 case 0x01000000UL:
2664 size = 0x7;
2665 break;
2666 case 0x04000000UL:
2667 size = 0x8;
2668 break;
2669 case 0x10000000UL:
2670 size = 0x9;
2671 break;
2672 case 0x40000000UL:
2673 size = 0xA;
2674 break;
2675#if defined (TARGET_PPC64)
2676 case 0x000100000000ULL:
2677 size = 0xB;
2678 break;
2679 case 0x000400000000ULL:
2680 size = 0xC;
2681 break;
2682 case 0x001000000000ULL:
2683 size = 0xD;
2684 break;
2685 case 0x004000000000ULL:
2686 size = 0xE;
2687 break;
2688 case 0x010000000000ULL:
2689 size = 0xF;
2690 break;
2691#endif
2692 default:
2693 size = -1;
2694 break;
2695 }
2696
2697 return size;
2698}
2699
2700/* Helpers for 4xx TLB management */
2701void do_4xx_tlbre_lo (void)
2702{
2703 ppcemb_tlb_t *tlb;
2704 int size;
2705
2706 T0 &= 0x3F;
2707 tlb = &env->tlb[T0].tlbe;
2708 T0 = tlb->EPN;
2709 if (tlb->prot & PAGE_VALID)
2710 T0 |= 0x400;
2711 size = booke_page_size_to_tlb(tlb->size);
2712 if (size < 0 || size > 0x7)
2713 size = 1;
2714 T0 |= size << 7;
2715 env->spr[SPR_40x_PID] = tlb->PID;
2716}
2717
2718void do_4xx_tlbre_hi (void)
2719{
2720 ppcemb_tlb_t *tlb;
2721
2722 T0 &= 0x3F;
2723 tlb = &env->tlb[T0].tlbe;
2724 T0 = tlb->RPN;
2725 if (tlb->prot & PAGE_EXEC)
2726 T0 |= 0x200;
2727 if (tlb->prot & PAGE_WRITE)
2728 T0 |= 0x100;
2729}
2730
2731void do_4xx_tlbwe_hi (void)
2732{
2733 ppcemb_tlb_t *tlb;
2734 target_ulong page, end;
2735
2736#if defined (DEBUG_SOFTWARE_TLB)
2737 if (loglevel != 0) {
2738 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2739 }
2740#endif
2741 T0 &= 0x3F;
2742 tlb = &env->tlb[T0].tlbe;
2743 /* Invalidate previous TLB (if it's valid) */
2744 if (tlb->prot & PAGE_VALID) {
2745 end = tlb->EPN + tlb->size;
2746#if defined (DEBUG_SOFTWARE_TLB)
2747 if (loglevel != 0) {
2748 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2749 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2750 }
2751#endif
2752 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2753 tlb_flush_page(env, page);
2754 }
2755 tlb->size = booke_tlb_to_page_size((T1 >> 7) & 0x7);
2756 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2757 * If this ever occurs, one should use the ppcemb target instead
2758 * of the ppc or ppc64 one
2759 */
2760 if ((T1 & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2761 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2762 "are not supported (%d)\n",
2763 tlb->size, TARGET_PAGE_SIZE, (int)((T1 >> 7) & 0x7));
2764 }
2765 tlb->EPN = T1 & ~(tlb->size - 1);
2766 if (T1 & 0x40)
2767 tlb->prot |= PAGE_VALID;
2768 else
2769 tlb->prot &= ~PAGE_VALID;
2770 if (T1 & 0x20) {
2771 /* XXX: TO BE FIXED */
2772 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2773 }
2774 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2775 tlb->attr = T1 & 0xFF;
2776#if defined (DEBUG_SOFTWARE_TLB)
2777 if (loglevel != 0) {
2778 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2779 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2780 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2781 tlb->prot & PAGE_READ ? 'r' : '-',
2782 tlb->prot & PAGE_WRITE ? 'w' : '-',
2783 tlb->prot & PAGE_EXEC ? 'x' : '-',
2784 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2785 }
2786#endif
2787 /* Invalidate new TLB (if valid) */
2788 if (tlb->prot & PAGE_VALID) {
2789 end = tlb->EPN + tlb->size;
2790#if defined (DEBUG_SOFTWARE_TLB)
2791 if (loglevel != 0) {
2792 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2793 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2794 }
2795#endif
2796 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2797 tlb_flush_page(env, page);
2798 }
2799}
2800
2801void do_4xx_tlbwe_lo (void)
2802{
2803 ppcemb_tlb_t *tlb;
2804
2805#if defined (DEBUG_SOFTWARE_TLB)
2806 if (loglevel != 0) {
2807 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2808 }
2809#endif
2810 T0 &= 0x3F;
2811 tlb = &env->tlb[T0].tlbe;
2812 tlb->RPN = T1 & 0xFFFFFC00;
2813 tlb->prot = PAGE_READ;
2814 if (T1 & 0x200)
2815 tlb->prot |= PAGE_EXEC;
2816 if (T1 & 0x100)
2817 tlb->prot |= PAGE_WRITE;
2818#if defined (DEBUG_SOFTWARE_TLB)
2819 if (loglevel != 0) {
2820 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2821 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2822 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2823 tlb->prot & PAGE_READ ? 'r' : '-',
2824 tlb->prot & PAGE_WRITE ? 'w' : '-',
2825 tlb->prot & PAGE_EXEC ? 'x' : '-',
2826 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2827 }
2828#endif
2829}
2830
2831/* PowerPC 440 TLB management */
2832void do_440_tlbwe (int word)
2833{
2834 ppcemb_tlb_t *tlb;
2835 target_ulong EPN, RPN, size;
2836 int do_flush_tlbs;
2837
2838#if defined (DEBUG_SOFTWARE_TLB)
2839 if (loglevel != 0) {
2840 fprintf(logfile, "%s word %d T0 " TDX " T1 " TDX "\n",
2841 __func__, word, T0, T1);
2842 }
2843#endif
2844 do_flush_tlbs = 0;
2845 T0 &= 0x3F;
2846 tlb = &env->tlb[T0].tlbe;
2847 switch (word) {
2848 default:
2849 /* Just here to please gcc */
2850 case 0:
2851 EPN = T1 & 0xFFFFFC00;
2852 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
2853 do_flush_tlbs = 1;
2854 tlb->EPN = EPN;
2855 size = booke_tlb_to_page_size((T1 >> 4) & 0xF);
2856 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
2857 do_flush_tlbs = 1;
2858 tlb->size = size;
2859 tlb->attr &= ~0x1;
2860 tlb->attr |= (T1 >> 8) & 1;
2861 if (T1 & 0x200) {
2862 tlb->prot |= PAGE_VALID;
2863 } else {
2864 if (tlb->prot & PAGE_VALID) {
2865 tlb->prot &= ~PAGE_VALID;
2866 do_flush_tlbs = 1;
2867 }
2868 }
2869 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2870 if (do_flush_tlbs)
2871 tlb_flush(env, 1);
2872 break;
2873 case 1:
2874 RPN = T1 & 0xFFFFFC0F;
2875 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
2876 tlb_flush(env, 1);
2877 tlb->RPN = RPN;
2878 break;
2879 case 2:
2880 tlb->attr = (tlb->attr & 0x1) | (T1 & 0x0000FF00);
2881 tlb->prot = tlb->prot & PAGE_VALID;
2882 if (T1 & 0x1)
2883 tlb->prot |= PAGE_READ << 4;
2884 if (T1 & 0x2)
2885 tlb->prot |= PAGE_WRITE << 4;
2886 if (T1 & 0x4)
2887 tlb->prot |= PAGE_EXEC << 4;
2888 if (T1 & 0x8)
2889 tlb->prot |= PAGE_READ;
2890 if (T1 & 0x10)
2891 tlb->prot |= PAGE_WRITE;
2892 if (T1 & 0x20)
2893 tlb->prot |= PAGE_EXEC;
2894 break;
2895 }
2896}
2897
2898void do_440_tlbre (int word)
2899{
2900 ppcemb_tlb_t *tlb;
2901 int size;
2902
2903 T0 &= 0x3F;
2904 tlb = &env->tlb[T0].tlbe;
2905 switch (word) {
2906 default:
2907 /* Just here to please gcc */
2908 case 0:
2909 T0 = tlb->EPN;
2910 size = booke_page_size_to_tlb(tlb->size);
2911 if (size < 0 || size > 0xF)
2912 size = 1;
2913 T0 |= size << 4;
2914 if (tlb->attr & 0x1)
2915 T0 |= 0x100;
2916 if (tlb->prot & PAGE_VALID)
2917 T0 |= 0x200;
2918 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
2919 env->spr[SPR_440_MMUCR] |= tlb->PID;
2920 break;
2921 case 1:
2922 T0 = tlb->RPN;
2923 break;
2924 case 2:
2925 T0 = tlb->attr & ~0x1;
2926 if (tlb->prot & (PAGE_READ << 4))
2927 T0 |= 0x1;
2928 if (tlb->prot & (PAGE_WRITE << 4))
2929 T0 |= 0x2;
2930 if (tlb->prot & (PAGE_EXEC << 4))
2931 T0 |= 0x4;
2932 if (tlb->prot & PAGE_READ)
2933 T0 |= 0x8;
2934 if (tlb->prot & PAGE_WRITE)
2935 T0 |= 0x10;
2936 if (tlb->prot & PAGE_EXEC)
2937 T0 |= 0x20;
2938 break;
2939 }
2940}
2941#endif /* !CONFIG_USER_ONLY */
This page took 0.050372 seconds and 4 git commands to generate.