2 * PowerPC integer and vector emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "qemu/host-utils.h"
24 #include "qemu/main-loop.h"
25 #include "exec/helper-proto.h"
26 #include "crypto/aes.h"
27 #include "fpu/softfloat.h"
28 #include "qapi/error.h"
29 #include "qemu/guest-random.h"
31 #include "helper_regs.h"
32 /*****************************************************************************/
33 /* Fixed point operations helpers */
35 static inline void helper_update_ov_legacy(CPUPPCState *env, int ov)
38 env->so = env->ov = 1;
44 target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb,
50 uint64_t dividend = (uint64_t)ra << 32;
51 uint64_t divisor = (uint32_t)rb;
53 if (unlikely(divisor == 0)) {
56 rt = dividend / divisor;
57 overflow = rt > UINT32_MAX;
60 if (unlikely(overflow)) {
61 rt = 0; /* Undefined */
65 helper_update_ov_legacy(env, overflow);
68 return (target_ulong)rt;
71 target_ulong helper_divwe(CPUPPCState *env, target_ulong ra, target_ulong rb,
77 int64_t dividend = (int64_t)ra << 32;
78 int64_t divisor = (int64_t)((int32_t)rb);
80 if (unlikely((divisor == 0) ||
81 ((divisor == -1ull) && (dividend == INT64_MIN)))) {
84 rt = dividend / divisor;
85 overflow = rt != (int32_t)rt;
88 if (unlikely(overflow)) {
89 rt = 0; /* Undefined */
93 helper_update_ov_legacy(env, overflow);
96 return (target_ulong)rt;
99 #if defined(TARGET_PPC64)
101 uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
106 overflow = divu128(&rt, &ra, rb);
108 if (unlikely(overflow)) {
109 rt = 0; /* Undefined */
113 helper_update_ov_legacy(env, overflow);
119 uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
122 int64_t ra = (int64_t)rau;
123 int64_t rb = (int64_t)rbu;
124 int overflow = divs128(&rt, &ra, rb);
126 if (unlikely(overflow)) {
127 rt = 0; /* Undefined */
131 helper_update_ov_legacy(env, overflow);
140 #if defined(TARGET_PPC64)
141 /* if x = 0xab, returns 0xababababababababa */
142 #define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff))
145 * subtract 1 from each byte, and with inverse, check if MSB is set at each
147 * i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80
148 * (0xFF & 0xFF) & 0x80 = 0x80 (zero found)
150 #define haszero(v) (((v) - pattern(0x01)) & ~(v) & pattern(0x80))
152 /* When you XOR the pattern and there is a match, that byte will be zero */
153 #define hasvalue(x, n) (haszero((x) ^ pattern(n)))
155 uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb)
157 return hasvalue(rb, ra) ? CRF_GT : 0;
165 * Return a random number.
167 uint64_t helper_darn32(void)
172 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
173 qemu_log_mask(LOG_UNIMP, "darn: Crypto failure: %s",
174 error_get_pretty(err));
182 uint64_t helper_darn64(void)
187 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
188 qemu_log_mask(LOG_UNIMP, "darn: Crypto failure: %s",
189 error_get_pretty(err));
197 uint64_t helper_bpermd(uint64_t rs, uint64_t rb)
202 for (i = 0; i < 8; i++) {
203 int index = (rs >> (i * 8)) & 0xFF;
205 if (rb & PPC_BIT(index)) {
215 target_ulong helper_cmpb(target_ulong rs, target_ulong rb)
217 target_ulong mask = 0xff;
221 for (i = 0; i < sizeof(target_ulong); i++) {
222 if ((rs & mask) == (rb & mask)) {
230 /* shift right arithmetic helper */
231 target_ulong helper_sraw(CPUPPCState *env, target_ulong value,
236 if (likely(!(shift & 0x20))) {
237 if (likely((uint32_t)shift != 0)) {
239 ret = (int32_t)value >> shift;
240 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
241 env->ca32 = env->ca = 0;
243 env->ca32 = env->ca = 1;
246 ret = (int32_t)value;
247 env->ca32 = env->ca = 0;
250 ret = (int32_t)value >> 31;
251 env->ca32 = env->ca = (ret != 0);
253 return (target_long)ret;
256 #if defined(TARGET_PPC64)
257 target_ulong helper_srad(CPUPPCState *env, target_ulong value,
262 if (likely(!(shift & 0x40))) {
263 if (likely((uint64_t)shift != 0)) {
265 ret = (int64_t)value >> shift;
266 if (likely(ret >= 0 || (value & ((1ULL << shift) - 1)) == 0)) {
267 env->ca32 = env->ca = 0;
269 env->ca32 = env->ca = 1;
272 ret = (int64_t)value;
273 env->ca32 = env->ca = 0;
276 ret = (int64_t)value >> 63;
277 env->ca32 = env->ca = (ret != 0);
283 #if defined(TARGET_PPC64)
284 target_ulong helper_popcntb(target_ulong val)
286 /* Note that we don't fold past bytes */
287 val = (val & 0x5555555555555555ULL) + ((val >> 1) &
288 0x5555555555555555ULL);
289 val = (val & 0x3333333333333333ULL) + ((val >> 2) &
290 0x3333333333333333ULL);
291 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
292 0x0f0f0f0f0f0f0f0fULL);
296 target_ulong helper_popcntw(target_ulong val)
298 /* Note that we don't fold past words. */
299 val = (val & 0x5555555555555555ULL) + ((val >> 1) &
300 0x5555555555555555ULL);
301 val = (val & 0x3333333333333333ULL) + ((val >> 2) &
302 0x3333333333333333ULL);
303 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
304 0x0f0f0f0f0f0f0f0fULL);
305 val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) &
306 0x00ff00ff00ff00ffULL);
307 val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
308 0x0000ffff0000ffffULL);
312 target_ulong helper_popcntb(target_ulong val)
314 /* Note that we don't fold past bytes */
315 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
316 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
317 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
322 /*****************************************************************************/
323 /* PowerPC 601 specific instructions (POWER bridge) */
324 target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2)
326 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
328 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
329 (int32_t)arg2 == 0) {
330 env->spr[SPR_MQ] = 0;
333 env->spr[SPR_MQ] = tmp % arg2;
334 return tmp / (int32_t)arg2;
338 target_ulong helper_divo(CPUPPCState *env, target_ulong arg1,
341 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
343 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
344 (int32_t)arg2 == 0) {
345 env->so = env->ov = 1;
346 env->spr[SPR_MQ] = 0;
349 env->spr[SPR_MQ] = tmp % arg2;
350 tmp /= (int32_t)arg2;
351 if ((int32_t)tmp != tmp) {
352 env->so = env->ov = 1;
360 target_ulong helper_divs(CPUPPCState *env, target_ulong arg1,
363 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
364 (int32_t)arg2 == 0) {
365 env->spr[SPR_MQ] = 0;
368 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
369 return (int32_t)arg1 / (int32_t)arg2;
373 target_ulong helper_divso(CPUPPCState *env, target_ulong arg1,
376 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
377 (int32_t)arg2 == 0) {
378 env->so = env->ov = 1;
379 env->spr[SPR_MQ] = 0;
383 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
384 return (int32_t)arg1 / (int32_t)arg2;
388 /*****************************************************************************/
389 /* 602 specific instructions */
390 /* mfrom is the most crazy instruction ever seen, imho ! */
391 /* Real implementation uses a ROM table. Do the same */
393 * Extremely decomposed:
395 * return 256 * log10(10 + 1.0) + 0.5
397 #if !defined(CONFIG_USER_ONLY)
398 target_ulong helper_602_mfrom(target_ulong arg)
400 if (likely(arg < 602)) {
401 #include "mfrom_table.inc.c"
402 return mfrom_ROM_table[arg];
409 /*****************************************************************************/
410 /* Altivec extension helpers */
411 #if defined(HOST_WORDS_BIGENDIAN)
412 #define VECTOR_FOR_INORDER_I(index, element) \
413 for (index = 0; index < ARRAY_SIZE(r->element); index++)
415 #define VECTOR_FOR_INORDER_I(index, element) \
416 for (index = ARRAY_SIZE(r->element) - 1; index >= 0; index--)
419 /* Saturating arithmetic helpers. */
420 #define SATCVT(from, to, from_type, to_type, min, max) \
421 static inline to_type cvt##from##to(from_type x, int *sat) \
425 if (x < (from_type)min) { \
428 } else if (x > (from_type)max) { \
436 #define SATCVTU(from, to, from_type, to_type, min, max) \
437 static inline to_type cvt##from##to(from_type x, int *sat) \
441 if (x > (from_type)max) { \
449 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
450 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
451 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
453 SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
454 SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
455 SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
456 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
457 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
458 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
462 void helper_lvsl(ppc_avr_t *r, target_ulong sh)
464 int i, j = (sh & 0xf);
466 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
471 void helper_lvsr(ppc_avr_t *r, target_ulong sh)
473 int i, j = 0x10 - (sh & 0xf);
475 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
480 void helper_mtvscr(CPUPPCState *env, uint32_t vscr)
482 env->vscr = vscr & ~(1u << VSCR_SAT);
483 /* Which bit we set is completely arbitrary, but clear the rest. */
484 env->vscr_sat.u64[0] = vscr & (1u << VSCR_SAT);
485 env->vscr_sat.u64[1] = 0;
486 set_flush_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status);
489 uint32_t helper_mfvscr(CPUPPCState *env)
491 uint32_t sat = (env->vscr_sat.u64[0] | env->vscr_sat.u64[1]) != 0;
492 return env->vscr | (sat << VSCR_SAT);
495 static inline void set_vscr_sat(CPUPPCState *env)
497 /* The choice of non-zero value is arbitrary. */
498 env->vscr_sat.u32[0] = 1;
501 void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
505 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
506 r->u32[i] = ~a->u32[i] < b->u32[i];
511 void helper_vprtybw(ppc_avr_t *r, ppc_avr_t *b)
514 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
515 uint64_t res = b->u32[i] ^ (b->u32[i] >> 16);
522 void helper_vprtybd(ppc_avr_t *r, ppc_avr_t *b)
525 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
526 uint64_t res = b->u64[i] ^ (b->u64[i] >> 32);
534 void helper_vprtybq(ppc_avr_t *r, ppc_avr_t *b)
536 uint64_t res = b->u64[0] ^ b->u64[1];
540 r->VsrD(1) = res & 1;
544 #define VARITH_DO(name, op, element) \
545 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
549 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
550 r->element[i] = a->element[i] op b->element[i]; \
553 VARITH_DO(muluwm, *, u32)
557 #define VARITHFP(suffix, func) \
558 void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
563 for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
564 r->f32[i] = func(a->f32[i], b->f32[i], &env->vec_status); \
567 VARITHFP(addfp, float32_add)
568 VARITHFP(subfp, float32_sub)
569 VARITHFP(minfp, float32_min)
570 VARITHFP(maxfp, float32_max)
573 #define VARITHFPFMA(suffix, type) \
574 void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
575 ppc_avr_t *b, ppc_avr_t *c) \
578 for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
579 r->f32[i] = float32_muladd(a->f32[i], c->f32[i], b->f32[i], \
580 type, &env->vec_status); \
583 VARITHFPFMA(maddfp, 0);
584 VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c);
587 #define VARITHSAT_CASE(type, op, cvt, element) \
589 type result = (type)a->element[i] op (type)b->element[i]; \
590 r->element[i] = cvt(result, &sat); \
593 #define VARITHSAT_DO(name, op, optype, cvt, element) \
594 void helper_v##name(ppc_avr_t *r, ppc_avr_t *vscr_sat, \
595 ppc_avr_t *a, ppc_avr_t *b, uint32_t desc) \
600 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
601 VARITHSAT_CASE(optype, op, cvt, element); \
604 vscr_sat->u32[0] = 1; \
607 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
608 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
609 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
610 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
611 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
612 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
613 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
614 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
615 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
616 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
617 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
618 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
619 #undef VARITHSAT_CASE
621 #undef VARITHSAT_SIGNED
622 #undef VARITHSAT_UNSIGNED
624 #define VAVG_DO(name, element, etype) \
625 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
629 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
630 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
631 r->element[i] = x >> 1; \
635 #define VAVG(type, signed_element, signed_type, unsigned_element, \
637 VAVG_DO(avgs##type, signed_element, signed_type) \
638 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
639 VAVG(b, s8, int16_t, u8, uint16_t)
640 VAVG(h, s16, int32_t, u16, uint32_t)
641 VAVG(w, s32, int64_t, u32, uint64_t)
645 #define VABSDU_DO(name, element) \
646 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
650 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
651 r->element[i] = (a->element[i] > b->element[i]) ? \
652 (a->element[i] - b->element[i]) : \
653 (b->element[i] - a->element[i]); \
658 * VABSDU - Vector absolute difference unsigned
659 * name - instruction mnemonic suffix (b: byte, h: halfword, w: word)
660 * element - element type to access from vector
662 #define VABSDU(type, element) \
663 VABSDU_DO(absdu##type, element)
670 #define VCF(suffix, cvt, element) \
671 void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r, \
672 ppc_avr_t *b, uint32_t uim) \
676 for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
677 float32 t = cvt(b->element[i], &env->vec_status); \
678 r->f32[i] = float32_scalbn(t, -uim, &env->vec_status); \
681 VCF(ux, uint32_to_float32, u32)
682 VCF(sx, int32_to_float32, s32)
685 #define VCMP_DO(suffix, compare, element, record) \
686 void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
687 ppc_avr_t *a, ppc_avr_t *b) \
689 uint64_t ones = (uint64_t)-1; \
690 uint64_t all = ones; \
694 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
695 uint64_t result = (a->element[i] compare b->element[i] ? \
697 switch (sizeof(a->element[0])) { \
699 r->u64[i] = result; \
702 r->u32[i] = result; \
705 r->u16[i] = result; \
715 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
718 #define VCMP(suffix, compare, element) \
719 VCMP_DO(suffix, compare, element, 0) \
720 VCMP_DO(suffix##_dot, compare, element, 1)
736 #define VCMPNE_DO(suffix, element, etype, cmpzero, record) \
737 void helper_vcmpne##suffix(CPUPPCState *env, ppc_avr_t *r, \
738 ppc_avr_t *a, ppc_avr_t *b) \
740 etype ones = (etype)-1; \
742 etype result, none = 0; \
745 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
747 result = ((a->element[i] == 0) \
748 || (b->element[i] == 0) \
749 || (a->element[i] != b->element[i]) ? \
752 result = (a->element[i] != b->element[i]) ? ones : 0x0; \
754 r->element[i] = result; \
759 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
764 * VCMPNEZ - Vector compare not equal to zero
765 * suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word)
766 * element - element type to access from vector
768 #define VCMPNE(suffix, element, etype, cmpzero) \
769 VCMPNE_DO(suffix, element, etype, cmpzero, 0) \
770 VCMPNE_DO(suffix##_dot, element, etype, cmpzero, 1)
771 VCMPNE(zb, u8, uint8_t, 1)
772 VCMPNE(zh, u16, uint16_t, 1)
773 VCMPNE(zw, u32, uint32_t, 1)
774 VCMPNE(b, u8, uint8_t, 0)
775 VCMPNE(h, u16, uint16_t, 0)
776 VCMPNE(w, u32, uint32_t, 0)
780 #define VCMPFP_DO(suffix, compare, order, record) \
781 void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
782 ppc_avr_t *a, ppc_avr_t *b) \
784 uint32_t ones = (uint32_t)-1; \
785 uint32_t all = ones; \
789 for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
791 int rel = float32_compare_quiet(a->f32[i], b->f32[i], \
793 if (rel == float_relation_unordered) { \
795 } else if (rel compare order) { \
800 r->u32[i] = result; \
805 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
808 #define VCMPFP(suffix, compare, order) \
809 VCMPFP_DO(suffix, compare, order, 0) \
810 VCMPFP_DO(suffix##_dot, compare, order, 1)
811 VCMPFP(eqfp, ==, float_relation_equal)
812 VCMPFP(gefp, !=, float_relation_less)
813 VCMPFP(gtfp, ==, float_relation_greater)
817 static inline void vcmpbfp_internal(CPUPPCState *env, ppc_avr_t *r,
818 ppc_avr_t *a, ppc_avr_t *b, int record)
823 for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
824 int le_rel = float32_compare_quiet(a->f32[i], b->f32[i],
826 if (le_rel == float_relation_unordered) {
827 r->u32[i] = 0xc0000000;
830 float32 bneg = float32_chs(b->f32[i]);
831 int ge_rel = float32_compare_quiet(a->f32[i], bneg,
833 int le = le_rel != float_relation_greater;
834 int ge = ge_rel != float_relation_less;
836 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
837 all_in |= (!le | !ge);
841 env->crf[6] = (all_in == 0) << 1;
845 void helper_vcmpbfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
847 vcmpbfp_internal(env, r, a, b, 0);
850 void helper_vcmpbfp_dot(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
853 vcmpbfp_internal(env, r, a, b, 1);
856 #define VCT(suffix, satcvt, element) \
857 void helper_vct##suffix(CPUPPCState *env, ppc_avr_t *r, \
858 ppc_avr_t *b, uint32_t uim) \
862 float_status s = env->vec_status; \
864 set_float_rounding_mode(float_round_to_zero, &s); \
865 for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
866 if (float32_is_any_nan(b->f32[i])) { \
869 float64 t = float32_to_float64(b->f32[i], &s); \
872 t = float64_scalbn(t, uim, &s); \
873 j = float64_to_int64(t, &s); \
874 r->element[i] = satcvt(j, &sat); \
881 VCT(uxs, cvtsduw, u32)
882 VCT(sxs, cvtsdsw, s32)
885 target_ulong helper_vclzlsbb(ppc_avr_t *r)
887 target_ulong count = 0;
889 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
890 if (r->VsrB(i) & 0x01) {
898 target_ulong helper_vctzlsbb(ppc_avr_t *r)
900 target_ulong count = 0;
902 for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
903 if (r->VsrB(i) & 0x01) {
911 void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
912 ppc_avr_t *b, ppc_avr_t *c)
917 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
918 int32_t prod = a->s16[i] * b->s16[i];
919 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
921 r->s16[i] = cvtswsh(t, &sat);
929 void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
930 ppc_avr_t *b, ppc_avr_t *c)
935 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
936 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
937 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
938 r->s16[i] = cvtswsh(t, &sat);
946 void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
950 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
951 int32_t prod = a->s16[i] * b->s16[i];
952 r->s16[i] = (int16_t) (prod + c->s16[i]);
956 #define VMRG_DO(name, element, access, ofs) \
957 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
960 int i, half = ARRAY_SIZE(r->element) / 2; \
962 for (i = 0; i < half; i++) { \
963 result.access(i * 2 + 0) = a->access(i + ofs); \
964 result.access(i * 2 + 1) = b->access(i + ofs); \
969 #define VMRG(suffix, element, access) \
970 VMRG_DO(mrgl##suffix, element, access, half) \
971 VMRG_DO(mrgh##suffix, element, access, 0)
978 void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
979 ppc_avr_t *b, ppc_avr_t *c)
984 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
985 prod[i] = (int32_t)a->s8[i] * b->u8[i];
988 VECTOR_FOR_INORDER_I(i, s32) {
989 r->s32[i] = c->s32[i] + prod[4 * i] + prod[4 * i + 1] +
990 prod[4 * i + 2] + prod[4 * i + 3];
994 void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
995 ppc_avr_t *b, ppc_avr_t *c)
1000 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
1001 prod[i] = a->s16[i] * b->s16[i];
1004 VECTOR_FOR_INORDER_I(i, s32) {
1005 r->s32[i] = c->s32[i] + prod[2 * i] + prod[2 * i + 1];
1009 void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
1010 ppc_avr_t *b, ppc_avr_t *c)
1016 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
1017 prod[i] = (int32_t)a->s16[i] * b->s16[i];
1020 VECTOR_FOR_INORDER_I(i, s32) {
1021 int64_t t = (int64_t)c->s32[i] + prod[2 * i] + prod[2 * i + 1];
1023 r->u32[i] = cvtsdsw(t, &sat);
1031 void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
1032 ppc_avr_t *b, ppc_avr_t *c)
1037 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
1038 prod[i] = a->u8[i] * b->u8[i];
1041 VECTOR_FOR_INORDER_I(i, u32) {
1042 r->u32[i] = c->u32[i] + prod[4 * i] + prod[4 * i + 1] +
1043 prod[4 * i + 2] + prod[4 * i + 3];
1047 void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
1048 ppc_avr_t *b, ppc_avr_t *c)
1053 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
1054 prod[i] = a->u16[i] * b->u16[i];
1057 VECTOR_FOR_INORDER_I(i, u32) {
1058 r->u32[i] = c->u32[i] + prod[2 * i] + prod[2 * i + 1];
1062 void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
1063 ppc_avr_t *b, ppc_avr_t *c)
1069 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
1070 prod[i] = a->u16[i] * b->u16[i];
1073 VECTOR_FOR_INORDER_I(i, s32) {
1074 uint64_t t = (uint64_t)c->u32[i] + prod[2 * i] + prod[2 * i + 1];
1076 r->u32[i] = cvtuduw(t, &sat);
1084 #define VMUL_DO_EVN(name, mul_element, mul_access, prod_access, cast) \
1085 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1089 for (i = 0; i < ARRAY_SIZE(r->mul_element); i += 2) { \
1090 r->prod_access(i >> 1) = (cast)a->mul_access(i) * \
1091 (cast)b->mul_access(i); \
1095 #define VMUL_DO_ODD(name, mul_element, mul_access, prod_access, cast) \
1096 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1100 for (i = 0; i < ARRAY_SIZE(r->mul_element); i += 2) { \
1101 r->prod_access(i >> 1) = (cast)a->mul_access(i + 1) * \
1102 (cast)b->mul_access(i + 1); \
1106 #define VMUL(suffix, mul_element, mul_access, prod_access, cast) \
1107 VMUL_DO_EVN(mule##suffix, mul_element, mul_access, prod_access, cast) \
1108 VMUL_DO_ODD(mulo##suffix, mul_element, mul_access, prod_access, cast)
1109 VMUL(sb, s8, VsrSB, VsrSH, int16_t)
1110 VMUL(sh, s16, VsrSH, VsrSW, int32_t)
1111 VMUL(sw, s32, VsrSW, VsrSD, int64_t)
1112 VMUL(ub, u8, VsrB, VsrH, uint16_t)
1113 VMUL(uh, u16, VsrH, VsrW, uint32_t)
1114 VMUL(uw, u32, VsrW, VsrD, uint64_t)
1119 void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
1125 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
1126 int s = c->VsrB(i) & 0x1f;
1127 int index = s & 0xf;
1130 result.VsrB(i) = b->VsrB(index);
1132 result.VsrB(i) = a->VsrB(index);
1138 void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
1144 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
1145 int s = c->VsrB(i) & 0x1f;
1146 int index = 15 - (s & 0xf);
1149 result.VsrB(i) = a->VsrB(index);
1151 result.VsrB(i) = b->VsrB(index);
1157 #if defined(HOST_WORDS_BIGENDIAN)
1158 #define VBPERMQ_INDEX(avr, i) ((avr)->u8[(i)])
1159 #define VBPERMD_INDEX(i) (i)
1160 #define VBPERMQ_DW(index) (((index) & 0x40) != 0)
1161 #define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1))
1163 #define VBPERMQ_INDEX(avr, i) ((avr)->u8[15 - (i)])
1164 #define VBPERMD_INDEX(i) (1 - i)
1165 #define VBPERMQ_DW(index) (((index) & 0x40) == 0)
1166 #define EXTRACT_BIT(avr, i, index) \
1167 (extract64((avr)->u64[1 - i], 63 - index, 1))
1170 void helper_vbpermd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1173 ppc_avr_t result = { .u64 = { 0, 0 } };
1174 VECTOR_FOR_INORDER_I(i, u64) {
1175 for (j = 0; j < 8; j++) {
1176 int index = VBPERMQ_INDEX(b, (i * 8) + j);
1177 if (index < 64 && EXTRACT_BIT(a, i, index)) {
1178 result.u64[VBPERMD_INDEX(i)] |= (0x80 >> j);
1185 void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1190 VECTOR_FOR_INORDER_I(i, u8) {
1191 int index = VBPERMQ_INDEX(b, i);
1194 uint64_t mask = (1ull << (63 - (index & 0x3F)));
1195 if (a->u64[VBPERMQ_DW(index)] & mask) {
1196 perm |= (0x8000 >> i);
1205 #undef VBPERMQ_INDEX
1208 static const uint64_t VGBBD_MASKS[256] = {
1209 0x0000000000000000ull, /* 00 */
1210 0x0000000000000080ull, /* 01 */
1211 0x0000000000008000ull, /* 02 */
1212 0x0000000000008080ull, /* 03 */
1213 0x0000000000800000ull, /* 04 */
1214 0x0000000000800080ull, /* 05 */
1215 0x0000000000808000ull, /* 06 */
1216 0x0000000000808080ull, /* 07 */
1217 0x0000000080000000ull, /* 08 */
1218 0x0000000080000080ull, /* 09 */
1219 0x0000000080008000ull, /* 0A */
1220 0x0000000080008080ull, /* 0B */
1221 0x0000000080800000ull, /* 0C */
1222 0x0000000080800080ull, /* 0D */
1223 0x0000000080808000ull, /* 0E */
1224 0x0000000080808080ull, /* 0F */
1225 0x0000008000000000ull, /* 10 */
1226 0x0000008000000080ull, /* 11 */
1227 0x0000008000008000ull, /* 12 */
1228 0x0000008000008080ull, /* 13 */
1229 0x0000008000800000ull, /* 14 */
1230 0x0000008000800080ull, /* 15 */
1231 0x0000008000808000ull, /* 16 */
1232 0x0000008000808080ull, /* 17 */
1233 0x0000008080000000ull, /* 18 */
1234 0x0000008080000080ull, /* 19 */
1235 0x0000008080008000ull, /* 1A */
1236 0x0000008080008080ull, /* 1B */
1237 0x0000008080800000ull, /* 1C */
1238 0x0000008080800080ull, /* 1D */
1239 0x0000008080808000ull, /* 1E */
1240 0x0000008080808080ull, /* 1F */
1241 0x0000800000000000ull, /* 20 */
1242 0x0000800000000080ull, /* 21 */
1243 0x0000800000008000ull, /* 22 */
1244 0x0000800000008080ull, /* 23 */
1245 0x0000800000800000ull, /* 24 */
1246 0x0000800000800080ull, /* 25 */
1247 0x0000800000808000ull, /* 26 */
1248 0x0000800000808080ull, /* 27 */
1249 0x0000800080000000ull, /* 28 */
1250 0x0000800080000080ull, /* 29 */
1251 0x0000800080008000ull, /* 2A */
1252 0x0000800080008080ull, /* 2B */
1253 0x0000800080800000ull, /* 2C */
1254 0x0000800080800080ull, /* 2D */
1255 0x0000800080808000ull, /* 2E */
1256 0x0000800080808080ull, /* 2F */
1257 0x0000808000000000ull, /* 30 */
1258 0x0000808000000080ull, /* 31 */
1259 0x0000808000008000ull, /* 32 */
1260 0x0000808000008080ull, /* 33 */
1261 0x0000808000800000ull, /* 34 */
1262 0x0000808000800080ull, /* 35 */
1263 0x0000808000808000ull, /* 36 */
1264 0x0000808000808080ull, /* 37 */
1265 0x0000808080000000ull, /* 38 */
1266 0x0000808080000080ull, /* 39 */
1267 0x0000808080008000ull, /* 3A */
1268 0x0000808080008080ull, /* 3B */
1269 0x0000808080800000ull, /* 3C */
1270 0x0000808080800080ull, /* 3D */
1271 0x0000808080808000ull, /* 3E */
1272 0x0000808080808080ull, /* 3F */
1273 0x0080000000000000ull, /* 40 */
1274 0x0080000000000080ull, /* 41 */
1275 0x0080000000008000ull, /* 42 */
1276 0x0080000000008080ull, /* 43 */
1277 0x0080000000800000ull, /* 44 */
1278 0x0080000000800080ull, /* 45 */
1279 0x0080000000808000ull, /* 46 */
1280 0x0080000000808080ull, /* 47 */
1281 0x0080000080000000ull, /* 48 */
1282 0x0080000080000080ull, /* 49 */
1283 0x0080000080008000ull, /* 4A */
1284 0x0080000080008080ull, /* 4B */
1285 0x0080000080800000ull, /* 4C */
1286 0x0080000080800080ull, /* 4D */
1287 0x0080000080808000ull, /* 4E */
1288 0x0080000080808080ull, /* 4F */
1289 0x0080008000000000ull, /* 50 */
1290 0x0080008000000080ull, /* 51 */
1291 0x0080008000008000ull, /* 52 */
1292 0x0080008000008080ull, /* 53 */
1293 0x0080008000800000ull, /* 54 */
1294 0x0080008000800080ull, /* 55 */
1295 0x0080008000808000ull, /* 56 */
1296 0x0080008000808080ull, /* 57 */
1297 0x0080008080000000ull, /* 58 */
1298 0x0080008080000080ull, /* 59 */
1299 0x0080008080008000ull, /* 5A */
1300 0x0080008080008080ull, /* 5B */
1301 0x0080008080800000ull, /* 5C */
1302 0x0080008080800080ull, /* 5D */
1303 0x0080008080808000ull, /* 5E */
1304 0x0080008080808080ull, /* 5F */
1305 0x0080800000000000ull, /* 60 */
1306 0x0080800000000080ull, /* 61 */
1307 0x0080800000008000ull, /* 62 */
1308 0x0080800000008080ull, /* 63 */
1309 0x0080800000800000ull, /* 64 */
1310 0x0080800000800080ull, /* 65 */
1311 0x0080800000808000ull, /* 66 */
1312 0x0080800000808080ull, /* 67 */
1313 0x0080800080000000ull, /* 68 */
1314 0x0080800080000080ull, /* 69 */
1315 0x0080800080008000ull, /* 6A */
1316 0x0080800080008080ull, /* 6B */
1317 0x0080800080800000ull, /* 6C */
1318 0x0080800080800080ull, /* 6D */
1319 0x0080800080808000ull, /* 6E */
1320 0x0080800080808080ull, /* 6F */
1321 0x0080808000000000ull, /* 70 */
1322 0x0080808000000080ull, /* 71 */
1323 0x0080808000008000ull, /* 72 */
1324 0x0080808000008080ull, /* 73 */
1325 0x0080808000800000ull, /* 74 */
1326 0x0080808000800080ull, /* 75 */
1327 0x0080808000808000ull, /* 76 */
1328 0x0080808000808080ull, /* 77 */
1329 0x0080808080000000ull, /* 78 */
1330 0x0080808080000080ull, /* 79 */
1331 0x0080808080008000ull, /* 7A */
1332 0x0080808080008080ull, /* 7B */
1333 0x0080808080800000ull, /* 7C */
1334 0x0080808080800080ull, /* 7D */
1335 0x0080808080808000ull, /* 7E */
1336 0x0080808080808080ull, /* 7F */
1337 0x8000000000000000ull, /* 80 */
1338 0x8000000000000080ull, /* 81 */
1339 0x8000000000008000ull, /* 82 */
1340 0x8000000000008080ull, /* 83 */
1341 0x8000000000800000ull, /* 84 */
1342 0x8000000000800080ull, /* 85 */
1343 0x8000000000808000ull, /* 86 */
1344 0x8000000000808080ull, /* 87 */
1345 0x8000000080000000ull, /* 88 */
1346 0x8000000080000080ull, /* 89 */
1347 0x8000000080008000ull, /* 8A */
1348 0x8000000080008080ull, /* 8B */
1349 0x8000000080800000ull, /* 8C */
1350 0x8000000080800080ull, /* 8D */
1351 0x8000000080808000ull, /* 8E */
1352 0x8000000080808080ull, /* 8F */
1353 0x8000008000000000ull, /* 90 */
1354 0x8000008000000080ull, /* 91 */
1355 0x8000008000008000ull, /* 92 */
1356 0x8000008000008080ull, /* 93 */
1357 0x8000008000800000ull, /* 94 */
1358 0x8000008000800080ull, /* 95 */
1359 0x8000008000808000ull, /* 96 */
1360 0x8000008000808080ull, /* 97 */
1361 0x8000008080000000ull, /* 98 */
1362 0x8000008080000080ull, /* 99 */
1363 0x8000008080008000ull, /* 9A */
1364 0x8000008080008080ull, /* 9B */
1365 0x8000008080800000ull, /* 9C */
1366 0x8000008080800080ull, /* 9D */
1367 0x8000008080808000ull, /* 9E */
1368 0x8000008080808080ull, /* 9F */
1369 0x8000800000000000ull, /* A0 */
1370 0x8000800000000080ull, /* A1 */
1371 0x8000800000008000ull, /* A2 */
1372 0x8000800000008080ull, /* A3 */
1373 0x8000800000800000ull, /* A4 */
1374 0x8000800000800080ull, /* A5 */
1375 0x8000800000808000ull, /* A6 */
1376 0x8000800000808080ull, /* A7 */
1377 0x8000800080000000ull, /* A8 */
1378 0x8000800080000080ull, /* A9 */
1379 0x8000800080008000ull, /* AA */
1380 0x8000800080008080ull, /* AB */
1381 0x8000800080800000ull, /* AC */
1382 0x8000800080800080ull, /* AD */
1383 0x8000800080808000ull, /* AE */
1384 0x8000800080808080ull, /* AF */
1385 0x8000808000000000ull, /* B0 */
1386 0x8000808000000080ull, /* B1 */
1387 0x8000808000008000ull, /* B2 */
1388 0x8000808000008080ull, /* B3 */
1389 0x8000808000800000ull, /* B4 */
1390 0x8000808000800080ull, /* B5 */
1391 0x8000808000808000ull, /* B6 */
1392 0x8000808000808080ull, /* B7 */
1393 0x8000808080000000ull, /* B8 */
1394 0x8000808080000080ull, /* B9 */
1395 0x8000808080008000ull, /* BA */
1396 0x8000808080008080ull, /* BB */
1397 0x8000808080800000ull, /* BC */
1398 0x8000808080800080ull, /* BD */
1399 0x8000808080808000ull, /* BE */
1400 0x8000808080808080ull, /* BF */
1401 0x8080000000000000ull, /* C0 */
1402 0x8080000000000080ull, /* C1 */
1403 0x8080000000008000ull, /* C2 */
1404 0x8080000000008080ull, /* C3 */
1405 0x8080000000800000ull, /* C4 */
1406 0x8080000000800080ull, /* C5 */
1407 0x8080000000808000ull, /* C6 */
1408 0x8080000000808080ull, /* C7 */
1409 0x8080000080000000ull, /* C8 */
1410 0x8080000080000080ull, /* C9 */
1411 0x8080000080008000ull, /* CA */
1412 0x8080000080008080ull, /* CB */
1413 0x8080000080800000ull, /* CC */
1414 0x8080000080800080ull, /* CD */
1415 0x8080000080808000ull, /* CE */
1416 0x8080000080808080ull, /* CF */
1417 0x8080008000000000ull, /* D0 */
1418 0x8080008000000080ull, /* D1 */
1419 0x8080008000008000ull, /* D2 */
1420 0x8080008000008080ull, /* D3 */
1421 0x8080008000800000ull, /* D4 */
1422 0x8080008000800080ull, /* D5 */
1423 0x8080008000808000ull, /* D6 */
1424 0x8080008000808080ull, /* D7 */
1425 0x8080008080000000ull, /* D8 */
1426 0x8080008080000080ull, /* D9 */
1427 0x8080008080008000ull, /* DA */
1428 0x8080008080008080ull, /* DB */
1429 0x8080008080800000ull, /* DC */
1430 0x8080008080800080ull, /* DD */
1431 0x8080008080808000ull, /* DE */
1432 0x8080008080808080ull, /* DF */
1433 0x8080800000000000ull, /* E0 */
1434 0x8080800000000080ull, /* E1 */
1435 0x8080800000008000ull, /* E2 */
1436 0x8080800000008080ull, /* E3 */
1437 0x8080800000800000ull, /* E4 */
1438 0x8080800000800080ull, /* E5 */
1439 0x8080800000808000ull, /* E6 */
1440 0x8080800000808080ull, /* E7 */
1441 0x8080800080000000ull, /* E8 */
1442 0x8080800080000080ull, /* E9 */
1443 0x8080800080008000ull, /* EA */
1444 0x8080800080008080ull, /* EB */
1445 0x8080800080800000ull, /* EC */
1446 0x8080800080800080ull, /* ED */
1447 0x8080800080808000ull, /* EE */
1448 0x8080800080808080ull, /* EF */
1449 0x8080808000000000ull, /* F0 */
1450 0x8080808000000080ull, /* F1 */
1451 0x8080808000008000ull, /* F2 */
1452 0x8080808000008080ull, /* F3 */
1453 0x8080808000800000ull, /* F4 */
1454 0x8080808000800080ull, /* F5 */
1455 0x8080808000808000ull, /* F6 */
1456 0x8080808000808080ull, /* F7 */
1457 0x8080808080000000ull, /* F8 */
1458 0x8080808080000080ull, /* F9 */
1459 0x8080808080008000ull, /* FA */
1460 0x8080808080008080ull, /* FB */
1461 0x8080808080800000ull, /* FC */
1462 0x8080808080800080ull, /* FD */
1463 0x8080808080808000ull, /* FE */
1464 0x8080808080808080ull, /* FF */
1467 void helper_vgbbd(ppc_avr_t *r, ppc_avr_t *b)
1470 uint64_t t[2] = { 0, 0 };
1472 VECTOR_FOR_INORDER_I(i, u8) {
1473 #if defined(HOST_WORDS_BIGENDIAN)
1474 t[i >> 3] |= VGBBD_MASKS[b->u8[i]] >> (i & 7);
1476 t[i >> 3] |= VGBBD_MASKS[b->u8[i]] >> (7 - (i & 7));
1484 #define PMSUM(name, srcfld, trgfld, trgtyp) \
1485 void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1488 trgtyp prod[sizeof(ppc_avr_t) / sizeof(a->srcfld[0])]; \
1490 VECTOR_FOR_INORDER_I(i, srcfld) { \
1492 for (j = 0; j < sizeof(a->srcfld[0]) * 8; j++) { \
1493 if (a->srcfld[i] & (1ull << j)) { \
1494 prod[i] ^= ((trgtyp)b->srcfld[i] << j); \
1499 VECTOR_FOR_INORDER_I(i, trgfld) { \
1500 r->trgfld[i] = prod[2 * i] ^ prod[2 * i + 1]; \
1504 PMSUM(vpmsumb, u8, u16, uint16_t)
1505 PMSUM(vpmsumh, u16, u32, uint32_t)
1506 PMSUM(vpmsumw, u32, u64, uint64_t)
1508 void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1511 #ifdef CONFIG_INT128
1513 __uint128_t prod[2];
1515 VECTOR_FOR_INORDER_I(i, u64) {
1517 for (j = 0; j < 64; j++) {
1518 if (a->u64[i] & (1ull << j)) {
1519 prod[i] ^= (((__uint128_t)b->u64[i]) << j);
1524 r->u128 = prod[0] ^ prod[1];
1530 VECTOR_FOR_INORDER_I(i, u64) {
1531 prod[i].VsrD(1) = prod[i].VsrD(0) = 0;
1532 for (j = 0; j < 64; j++) {
1533 if (a->u64[i] & (1ull << j)) {
1537 bshift.VsrD(1) = b->u64[i];
1539 bshift.VsrD(0) = b->u64[i] >> (64 - j);
1540 bshift.VsrD(1) = b->u64[i] << j;
1542 prod[i].VsrD(1) ^= bshift.VsrD(1);
1543 prod[i].VsrD(0) ^= bshift.VsrD(0);
1548 r->VsrD(1) = prod[0].VsrD(1) ^ prod[1].VsrD(1);
1549 r->VsrD(0) = prod[0].VsrD(0) ^ prod[1].VsrD(0);
1554 #if defined(HOST_WORDS_BIGENDIAN)
1559 void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1563 #if defined(HOST_WORDS_BIGENDIAN)
1564 const ppc_avr_t *x[2] = { a, b };
1566 const ppc_avr_t *x[2] = { b, a };
1569 VECTOR_FOR_INORDER_I(i, u64) {
1570 VECTOR_FOR_INORDER_I(j, u32) {
1571 uint32_t e = x[i]->u32[j];
1573 result.u16[4 * i + j] = (((e >> 9) & 0xfc00) |
1574 ((e >> 6) & 0x3e0) |
1581 #define VPK(suffix, from, to, cvt, dosat) \
1582 void helper_vpk##suffix(CPUPPCState *env, ppc_avr_t *r, \
1583 ppc_avr_t *a, ppc_avr_t *b) \
1588 ppc_avr_t *a0 = PKBIG ? a : b; \
1589 ppc_avr_t *a1 = PKBIG ? b : a; \
1591 VECTOR_FOR_INORDER_I(i, from) { \
1592 result.to[i] = cvt(a0->from[i], &sat); \
1593 result.to[i + ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);\
1596 if (dosat && sat) { \
1597 set_vscr_sat(env); \
1601 VPK(shss, s16, s8, cvtshsb, 1)
1602 VPK(shus, s16, u8, cvtshub, 1)
1603 VPK(swss, s32, s16, cvtswsh, 1)
1604 VPK(swus, s32, u16, cvtswuh, 1)
1605 VPK(sdss, s64, s32, cvtsdsw, 1)
1606 VPK(sdus, s64, u32, cvtsduw, 1)
1607 VPK(uhus, u16, u8, cvtuhub, 1)
1608 VPK(uwus, u32, u16, cvtuwuh, 1)
1609 VPK(udus, u64, u32, cvtuduw, 1)
1610 VPK(uhum, u16, u8, I, 0)
1611 VPK(uwum, u32, u16, I, 0)
1612 VPK(udum, u64, u32, I, 0)
1617 void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1621 for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
1622 r->f32[i] = float32_div(float32_one, b->f32[i], &env->vec_status);
1626 #define VRFI(suffix, rounding) \
1627 void helper_vrfi##suffix(CPUPPCState *env, ppc_avr_t *r, \
1631 float_status s = env->vec_status; \
1633 set_float_rounding_mode(rounding, &s); \
1634 for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
1635 r->f32[i] = float32_round_to_int (b->f32[i], &s); \
1638 VRFI(n, float_round_nearest_even)
1639 VRFI(m, float_round_down)
1640 VRFI(p, float_round_up)
1641 VRFI(z, float_round_to_zero)
1644 #define VROTATE(suffix, element, mask) \
1645 void helper_vrl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1649 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1650 unsigned int shift = b->element[i] & mask; \
1651 r->element[i] = (a->element[i] << shift) | \
1652 (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
1656 VROTATE(h, u16, 0xF)
1657 VROTATE(w, u32, 0x1F)
1658 VROTATE(d, u64, 0x3F)
1661 void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1665 for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
1666 float32 t = float32_sqrt(b->f32[i], &env->vec_status);
1668 r->f32[i] = float32_div(float32_one, t, &env->vec_status);
1672 #define VRLMI(name, size, element, insert) \
1673 void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1676 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1677 uint##size##_t src1 = a->element[i]; \
1678 uint##size##_t src2 = b->element[i]; \
1679 uint##size##_t src3 = r->element[i]; \
1680 uint##size##_t begin, end, shift, mask, rot_val; \
1682 shift = extract##size(src2, 0, 6); \
1683 end = extract##size(src2, 8, 6); \
1684 begin = extract##size(src2, 16, 6); \
1685 rot_val = rol##size(src1, shift); \
1686 mask = mask_u##size(begin, end); \
1688 r->element[i] = (rot_val & mask) | (src3 & ~mask); \
1690 r->element[i] = (rot_val & mask); \
1695 VRLMI(vrldmi, 64, u64, 1);
1696 VRLMI(vrlwmi, 32, u32, 1);
1697 VRLMI(vrldnm, 64, u64, 0);
1698 VRLMI(vrlwnm, 32, u32, 0);
1700 void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
1703 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
1704 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
1707 void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1711 for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
1712 r->f32[i] = float32_exp2(b->f32[i], &env->vec_status);
1716 void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1720 for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
1721 r->f32[i] = float32_log2(b->f32[i], &env->vec_status);
1725 #if defined(HOST_WORDS_BIGENDIAN)
1726 #define VEXTU_X_DO(name, size, left) \
1727 target_ulong glue(helper_, name)(target_ulong a, ppc_avr_t *b) \
1731 index = (a & 0xf) * 8; \
1733 index = ((15 - (a & 0xf) + 1) * 8) - size; \
1735 return int128_getlo(int128_rshift(b->s128, index)) & \
1736 MAKE_64BIT_MASK(0, size); \
1739 #define VEXTU_X_DO(name, size, left) \
1740 target_ulong glue(helper_, name)(target_ulong a, ppc_avr_t *b) \
1744 index = ((15 - (a & 0xf) + 1) * 8) - size; \
1746 index = (a & 0xf) * 8; \
1748 return int128_getlo(int128_rshift(b->s128, index)) & \
1749 MAKE_64BIT_MASK(0, size); \
1753 VEXTU_X_DO(vextublx, 8, 1)
1754 VEXTU_X_DO(vextuhlx, 16, 1)
1755 VEXTU_X_DO(vextuwlx, 32, 1)
1756 VEXTU_X_DO(vextubrx, 8, 0)
1757 VEXTU_X_DO(vextuhrx, 16, 0)
1758 VEXTU_X_DO(vextuwrx, 32, 0)
1762 * The specification says that the results are undefined if all of the
1763 * shift counts are not identical. We check to make sure that they
1764 * are to conform to what real hardware appears to do.
1766 #define VSHIFT(suffix, leftp) \
1767 void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1769 int shift = b->VsrB(15) & 0x7; \
1773 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
1774 doit = doit && ((b->u8[i] & 0x7) == shift); \
1779 } else if (leftp) { \
1780 uint64_t carry = a->VsrD(1) >> (64 - shift); \
1782 r->VsrD(0) = (a->VsrD(0) << shift) | carry; \
1783 r->VsrD(1) = a->VsrD(1) << shift; \
1785 uint64_t carry = a->VsrD(0) << (64 - shift); \
1787 r->VsrD(1) = (a->VsrD(1) >> shift) | carry; \
1788 r->VsrD(0) = a->VsrD(0) >> shift; \
1796 void helper_vslv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1799 unsigned int shift, bytes, size;
1801 size = ARRAY_SIZE(r->u8);
1802 for (i = 0; i < size; i++) {
1803 shift = b->VsrB(i) & 0x7; /* extract shift value */
1804 bytes = (a->VsrB(i) << 8) + /* extract adjacent bytes */
1805 (((i + 1) < size) ? a->VsrB(i + 1) : 0);
1806 r->VsrB(i) = (bytes << shift) >> 8; /* shift and store result */
1810 void helper_vsrv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1813 unsigned int shift, bytes;
1816 * Use reverse order, as destination and source register can be
1817 * same. Its being modified in place saving temporary, reverse
1818 * order will guarantee that computed result is not fed back.
1820 for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
1821 shift = b->VsrB(i) & 0x7; /* extract shift value */
1822 bytes = ((i ? a->VsrB(i - 1) : 0) << 8) + a->VsrB(i);
1823 /* extract adjacent bytes */
1824 r->VsrB(i) = (bytes >> shift) & 0xFF; /* shift and store result */
1828 void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
1830 int sh = shift & 0xf;
1834 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
1837 result.VsrB(i) = b->VsrB(index - 0x10);
1839 result.VsrB(i) = a->VsrB(index);
1845 void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1847 int sh = (b->VsrB(0xf) >> 3) & 0xf;
1849 #if defined(HOST_WORDS_BIGENDIAN)
1850 memmove(&r->u8[0], &a->u8[sh], 16 - sh);
1851 memset(&r->u8[16 - sh], 0, sh);
1853 memmove(&r->u8[sh], &a->u8[0], 16 - sh);
1854 memset(&r->u8[0], 0, sh);
1858 #if defined(HOST_WORDS_BIGENDIAN)
1859 #define VINSERT(suffix, element) \
1860 void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
1862 memmove(&r->u8[index], &b->u8[8 - sizeof(r->element[0])], \
1863 sizeof(r->element[0])); \
1866 #define VINSERT(suffix, element) \
1867 void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
1869 uint32_t d = (16 - index) - sizeof(r->element[0]); \
1870 memmove(&r->u8[d], &b->u8[8], sizeof(r->element[0])); \
1878 #if defined(HOST_WORDS_BIGENDIAN)
1879 #define VEXTRACT(suffix, element) \
1880 void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
1882 uint32_t es = sizeof(r->element[0]); \
1883 memmove(&r->u8[8 - es], &b->u8[index], es); \
1884 memset(&r->u8[8], 0, 8); \
1885 memset(&r->u8[0], 0, 8 - es); \
1888 #define VEXTRACT(suffix, element) \
1889 void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
1891 uint32_t es = sizeof(r->element[0]); \
1892 uint32_t s = (16 - index) - es; \
1893 memmove(&r->u8[8], &b->u8[s], es); \
1894 memset(&r->u8[0], 0, 8); \
1895 memset(&r->u8[8 + es], 0, 8 - es); \
1904 void helper_xxextractuw(CPUPPCState *env, ppc_vsr_t *xt,
1905 ppc_vsr_t *xb, uint32_t index)
1908 size_t es = sizeof(uint32_t);
1913 for (i = 0; i < es; i++, ext_index++) {
1914 t.VsrB(8 - es + i) = xb->VsrB(ext_index % 16);
1920 void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt,
1921 ppc_vsr_t *xb, uint32_t index)
1924 size_t es = sizeof(uint32_t);
1925 int ins_index, i = 0;
1928 for (i = 0; i < es && ins_index < 16; i++, ins_index++) {
1929 t.VsrB(ins_index) = xb->VsrB(8 - es + i);
1935 #define VEXT_SIGNED(name, element, cast) \
1936 void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
1939 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1940 r->element[i] = (cast)b->element[i]; \
1943 VEXT_SIGNED(vextsb2w, s32, int8_t)
1944 VEXT_SIGNED(vextsb2d, s64, int8_t)
1945 VEXT_SIGNED(vextsh2w, s32, int16_t)
1946 VEXT_SIGNED(vextsh2d, s64, int16_t)
1947 VEXT_SIGNED(vextsw2d, s64, int32_t)
1950 #define VNEG(name, element) \
1951 void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
1954 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1955 r->element[i] = -b->element[i]; \
1962 void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1964 int sh = (b->VsrB(0xf) >> 3) & 0xf;
1966 #if defined(HOST_WORDS_BIGENDIAN)
1967 memmove(&r->u8[sh], &a->u8[0], 16 - sh);
1968 memset(&r->u8[0], 0, sh);
1970 memmove(&r->u8[0], &a->u8[sh], 16 - sh);
1971 memset(&r->u8[16 - sh], 0, sh);
1975 void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1979 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1980 r->u32[i] = a->u32[i] >= b->u32[i];
1984 void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1991 upper = ARRAY_SIZE(r->s32) - 1;
1992 t = (int64_t)b->VsrSW(upper);
1993 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1995 result.VsrSW(i) = 0;
1997 result.VsrSW(upper) = cvtsdsw(t, &sat);
2005 void helper_vsum2sws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2012 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2013 int64_t t = (int64_t)b->VsrSW(upper + i * 2);
2016 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2017 t += a->VsrSW(2 * i + j);
2019 result.VsrSW(upper + i * 2) = cvtsdsw(t, &sat);
2028 void helper_vsum4sbs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2033 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2034 int64_t t = (int64_t)b->s32[i];
2036 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2037 t += a->s8[4 * i + j];
2039 r->s32[i] = cvtsdsw(t, &sat);
2047 void helper_vsum4shs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2052 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2053 int64_t t = (int64_t)b->s32[i];
2055 t += a->s16[2 * i] + a->s16[2 * i + 1];
2056 r->s32[i] = cvtsdsw(t, &sat);
2064 void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2069 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2070 uint64_t t = (uint64_t)b->u32[i];
2072 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2073 t += a->u8[4 * i + j];
2075 r->u32[i] = cvtuduw(t, &sat);
2083 #if defined(HOST_WORDS_BIGENDIAN)
2090 #define VUPKPX(suffix, hi) \
2091 void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
2096 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
2097 uint16_t e = b->u16[hi ? i : i + 4]; \
2098 uint8_t a = (e >> 15) ? 0xff : 0; \
2099 uint8_t r = (e >> 10) & 0x1f; \
2100 uint8_t g = (e >> 5) & 0x1f; \
2101 uint8_t b = e & 0x1f; \
2103 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
2111 #define VUPK(suffix, unpacked, packee, hi) \
2112 void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
2118 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
2119 result.unpacked[i] = b->packee[i]; \
2122 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \
2124 result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
2129 VUPK(hsb, s16, s8, UPKHI)
2130 VUPK(hsh, s32, s16, UPKHI)
2131 VUPK(hsw, s64, s32, UPKHI)
2132 VUPK(lsb, s16, s8, UPKLO)
2133 VUPK(lsh, s32, s16, UPKLO)
2134 VUPK(lsw, s64, s32, UPKLO)
2139 #define VGENERIC_DO(name, element) \
2140 void helper_v##name(ppc_avr_t *r, ppc_avr_t *b) \
2144 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2145 r->element[i] = name(b->element[i]); \
2149 #define clzb(v) ((v) ? clz32((uint32_t)(v) << 24) : 8)
2150 #define clzh(v) ((v) ? clz32((uint32_t)(v) << 16) : 16)
2151 #define clzw(v) clz32((v))
2152 #define clzd(v) clz64((v))
2154 VGENERIC_DO(clzb, u8)
2155 VGENERIC_DO(clzh, u16)
2156 VGENERIC_DO(clzw, u32)
2157 VGENERIC_DO(clzd, u64)
2164 #define ctzb(v) ((v) ? ctz32(v) : 8)
2165 #define ctzh(v) ((v) ? ctz32(v) : 16)
2166 #define ctzw(v) ctz32((v))
2167 #define ctzd(v) ctz64((v))
2169 VGENERIC_DO(ctzb, u8)
2170 VGENERIC_DO(ctzh, u16)
2171 VGENERIC_DO(ctzw, u32)
2172 VGENERIC_DO(ctzd, u64)
2179 #define popcntb(v) ctpop8(v)
2180 #define popcnth(v) ctpop16(v)
2181 #define popcntw(v) ctpop32(v)
2182 #define popcntd(v) ctpop64(v)
2184 VGENERIC_DO(popcntb, u8)
2185 VGENERIC_DO(popcnth, u16)
2186 VGENERIC_DO(popcntw, u32)
2187 VGENERIC_DO(popcntd, u64)
2196 #if defined(HOST_WORDS_BIGENDIAN)
2197 #define QW_ONE { .u64 = { 0, 1 } }
2199 #define QW_ONE { .u64 = { 1, 0 } }
2202 #ifndef CONFIG_INT128
2204 static inline void avr_qw_not(ppc_avr_t *t, ppc_avr_t a)
2206 t->u64[0] = ~a.u64[0];
2207 t->u64[1] = ~a.u64[1];
2210 static int avr_qw_cmpu(ppc_avr_t a, ppc_avr_t b)
2212 if (a.VsrD(0) < b.VsrD(0)) {
2214 } else if (a.VsrD(0) > b.VsrD(0)) {
2216 } else if (a.VsrD(1) < b.VsrD(1)) {
2218 } else if (a.VsrD(1) > b.VsrD(1)) {
2225 static void avr_qw_add(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b)
2227 t->VsrD(1) = a.VsrD(1) + b.VsrD(1);
2228 t->VsrD(0) = a.VsrD(0) + b.VsrD(0) +
2229 (~a.VsrD(1) < b.VsrD(1));
2232 static int avr_qw_addc(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b)
2235 t->VsrD(1) = a.VsrD(1) + b.VsrD(1);
2236 t->VsrD(0) = a.VsrD(0) + b.VsrD(0) +
2237 (~a.VsrD(1) < b.VsrD(1));
2238 avr_qw_not(¬_a, a);
2239 return avr_qw_cmpu(not_a, b) < 0;
2244 void helper_vadduqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2246 #ifdef CONFIG_INT128
2247 r->u128 = a->u128 + b->u128;
2249 avr_qw_add(r, *a, *b);
2253 void helper_vaddeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2255 #ifdef CONFIG_INT128
2256 r->u128 = a->u128 + b->u128 + (c->u128 & 1);
2259 if (c->VsrD(1) & 1) {
2263 tmp.VsrD(1) = c->VsrD(1) & 1;
2264 avr_qw_add(&tmp, *a, tmp);
2265 avr_qw_add(r, tmp, *b);
2267 avr_qw_add(r, *a, *b);
2272 void helper_vaddcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2274 #ifdef CONFIG_INT128
2275 r->u128 = (~a->u128 < b->u128);
2279 avr_qw_not(¬_a, *a);
2282 r->VsrD(1) = (avr_qw_cmpu(not_a, *b) < 0);
2286 void helper_vaddecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2288 #ifdef CONFIG_INT128
2289 int carry_out = (~a->u128 < b->u128);
2290 if (!carry_out && (c->u128 & 1)) {
2291 carry_out = ((a->u128 + b->u128 + 1) == 0) &&
2292 ((a->u128 != 0) || (b->u128 != 0));
2294 r->u128 = carry_out;
2297 int carry_in = c->VsrD(1) & 1;
2301 carry_out = avr_qw_addc(&tmp, *a, *b);
2303 if (!carry_out && carry_in) {
2304 ppc_avr_t one = QW_ONE;
2305 carry_out = avr_qw_addc(&tmp, tmp, one);
2308 r->VsrD(1) = carry_out;
2312 void helper_vsubuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2314 #ifdef CONFIG_INT128
2315 r->u128 = a->u128 - b->u128;
2318 ppc_avr_t one = QW_ONE;
2320 avr_qw_not(&tmp, *b);
2321 avr_qw_add(&tmp, *a, tmp);
2322 avr_qw_add(r, tmp, one);
2326 void helper_vsubeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2328 #ifdef CONFIG_INT128
2329 r->u128 = a->u128 + ~b->u128 + (c->u128 & 1);
2333 avr_qw_not(&tmp, *b);
2334 avr_qw_add(&sum, *a, tmp);
2337 tmp.VsrD(1) = c->VsrD(1) & 1;
2338 avr_qw_add(r, sum, tmp);
2342 void helper_vsubcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2344 #ifdef CONFIG_INT128
2345 r->u128 = (~a->u128 < ~b->u128) ||
2346 (a->u128 + ~b->u128 == (__uint128_t)-1);
2348 int carry = (avr_qw_cmpu(*a, *b) > 0);
2351 avr_qw_not(&tmp, *b);
2352 avr_qw_add(&tmp, *a, tmp);
2353 carry = ((tmp.VsrSD(0) == -1ull) && (tmp.VsrSD(1) == -1ull));
2360 void helper_vsubecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2362 #ifdef CONFIG_INT128
2364 (~a->u128 < ~b->u128) ||
2365 ((c->u128 & 1) && (a->u128 + ~b->u128 == (__uint128_t)-1));
2367 int carry_in = c->VsrD(1) & 1;
2368 int carry_out = (avr_qw_cmpu(*a, *b) > 0);
2369 if (!carry_out && carry_in) {
2371 avr_qw_not(&tmp, *b);
2372 avr_qw_add(&tmp, *a, tmp);
2373 carry_out = ((tmp.VsrD(0) == -1ull) && (tmp.VsrD(1) == -1ull));
2377 r->VsrD(1) = carry_out;
2381 #define BCD_PLUS_PREF_1 0xC
2382 #define BCD_PLUS_PREF_2 0xF
2383 #define BCD_PLUS_ALT_1 0xA
2384 #define BCD_NEG_PREF 0xD
2385 #define BCD_NEG_ALT 0xB
2386 #define BCD_PLUS_ALT_2 0xE
2387 #define NATIONAL_PLUS 0x2B
2388 #define NATIONAL_NEG 0x2D
2390 #if defined(HOST_WORDS_BIGENDIAN)
2391 #define BCD_DIG_BYTE(n) (15 - ((n) / 2))
2393 #define BCD_DIG_BYTE(n) ((n) / 2)
2396 static int bcd_get_sgn(ppc_avr_t *bcd)
2398 switch (bcd->u8[BCD_DIG_BYTE(0)] & 0xF) {
2399 case BCD_PLUS_PREF_1:
2400 case BCD_PLUS_PREF_2:
2401 case BCD_PLUS_ALT_1:
2402 case BCD_PLUS_ALT_2:
2420 static int bcd_preferred_sgn(int sgn, int ps)
2423 return (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2;
2425 return BCD_NEG_PREF;
2429 static uint8_t bcd_get_digit(ppc_avr_t *bcd, int n, int *invalid)
2433 result = bcd->u8[BCD_DIG_BYTE(n)] >> 4;
2435 result = bcd->u8[BCD_DIG_BYTE(n)] & 0xF;
2438 if (unlikely(result > 9)) {
2444 static void bcd_put_digit(ppc_avr_t *bcd, uint8_t digit, int n)
2447 bcd->u8[BCD_DIG_BYTE(n)] &= 0x0F;
2448 bcd->u8[BCD_DIG_BYTE(n)] |= (digit << 4);
2450 bcd->u8[BCD_DIG_BYTE(n)] &= 0xF0;
2451 bcd->u8[BCD_DIG_BYTE(n)] |= digit;
2455 static bool bcd_is_valid(ppc_avr_t *bcd)
2460 if (bcd_get_sgn(bcd) == 0) {
2464 for (i = 1; i < 32; i++) {
2465 bcd_get_digit(bcd, i, &invalid);
2466 if (unlikely(invalid)) {
2473 static int bcd_cmp_zero(ppc_avr_t *bcd)
2475 if (bcd->VsrD(0) == 0 && (bcd->VsrD(1) >> 4) == 0) {
2478 return (bcd_get_sgn(bcd) == 1) ? CRF_GT : CRF_LT;
2482 static uint16_t get_national_digit(ppc_avr_t *reg, int n)
2484 return reg->VsrH(7 - n);
2487 static void set_national_digit(ppc_avr_t *reg, uint8_t val, int n)
2489 reg->VsrH(7 - n) = val;
2492 static int bcd_cmp_mag(ppc_avr_t *a, ppc_avr_t *b)
2496 for (i = 31; i > 0; i--) {
2497 uint8_t dig_a = bcd_get_digit(a, i, &invalid);
2498 uint8_t dig_b = bcd_get_digit(b, i, &invalid);
2499 if (unlikely(invalid)) {
2500 return 0; /* doesn't matter */
2501 } else if (dig_a > dig_b) {
2503 } else if (dig_a < dig_b) {
2511 static void bcd_add_mag(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, int *invalid,
2516 for (i = 1; i <= 31; i++) {
2517 uint8_t digit = bcd_get_digit(a, i, invalid) +
2518 bcd_get_digit(b, i, invalid) + carry;
2526 bcd_put_digit(t, digit, i);
2532 static void bcd_sub_mag(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, int *invalid,
2538 for (i = 1; i <= 31; i++) {
2539 uint8_t digit = bcd_get_digit(a, i, invalid) -
2540 bcd_get_digit(b, i, invalid) + carry;
2548 bcd_put_digit(t, digit, i);
2554 uint32_t helper_bcdadd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
2557 int sgna = bcd_get_sgn(a);
2558 int sgnb = bcd_get_sgn(b);
2559 int invalid = (sgna == 0) || (sgnb == 0);
2562 ppc_avr_t result = { .u64 = { 0, 0 } };
2566 result.u8[BCD_DIG_BYTE(0)] = bcd_preferred_sgn(sgna, ps);
2567 bcd_add_mag(&result, a, b, &invalid, &overflow);
2568 cr = bcd_cmp_zero(&result);
2570 int magnitude = bcd_cmp_mag(a, b);
2571 if (magnitude > 0) {
2572 result.u8[BCD_DIG_BYTE(0)] = bcd_preferred_sgn(sgna, ps);
2573 bcd_sub_mag(&result, a, b, &invalid, &overflow);
2574 cr = (sgna > 0) ? CRF_GT : CRF_LT;
2575 } else if (magnitude < 0) {
2576 result.u8[BCD_DIG_BYTE(0)] = bcd_preferred_sgn(sgnb, ps);
2577 bcd_sub_mag(&result, b, a, &invalid, &overflow);
2578 cr = (sgnb > 0) ? CRF_GT : CRF_LT;
2580 result.u8[BCD_DIG_BYTE(0)] = bcd_preferred_sgn(0, ps);
2586 if (unlikely(invalid)) {
2587 result.VsrD(0) = result.VsrD(1) = -1;
2589 } else if (overflow) {
2598 uint32_t helper_bcdsub(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
2600 ppc_avr_t bcopy = *b;
2601 int sgnb = bcd_get_sgn(b);
2603 bcd_put_digit(&bcopy, BCD_PLUS_PREF_1, 0);
2604 } else if (sgnb > 0) {
2605 bcd_put_digit(&bcopy, BCD_NEG_PREF, 0);
2607 /* else invalid ... defer to bcdadd code for proper handling */
2609 return helper_bcdadd(r, a, &bcopy, ps);
2612 uint32_t helper_bcdcfn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
2616 uint16_t national = 0;
2617 uint16_t sgnb = get_national_digit(b, 0);
2618 ppc_avr_t ret = { .u64 = { 0, 0 } };
2619 int invalid = (sgnb != NATIONAL_PLUS && sgnb != NATIONAL_NEG);
2621 for (i = 1; i < 8; i++) {
2622 national = get_national_digit(b, i);
2623 if (unlikely(national < 0x30 || national > 0x39)) {
2628 bcd_put_digit(&ret, national & 0xf, i);
2631 if (sgnb == NATIONAL_PLUS) {
2632 bcd_put_digit(&ret, (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2, 0);
2634 bcd_put_digit(&ret, BCD_NEG_PREF, 0);
2637 cr = bcd_cmp_zero(&ret);
2639 if (unlikely(invalid)) {
2648 uint32_t helper_bcdctn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
2652 int sgnb = bcd_get_sgn(b);
2653 int invalid = (sgnb == 0);
2654 ppc_avr_t ret = { .u64 = { 0, 0 } };
2656 int ox_flag = (b->VsrD(0) != 0) || ((b->VsrD(1) >> 32) != 0);
2658 for (i = 1; i < 8; i++) {
2659 set_national_digit(&ret, 0x30 + bcd_get_digit(b, i, &invalid), i);
2661 if (unlikely(invalid)) {
2665 set_national_digit(&ret, (sgnb == -1) ? NATIONAL_NEG : NATIONAL_PLUS, 0);
2667 cr = bcd_cmp_zero(b);
2673 if (unlikely(invalid)) {
2682 uint32_t helper_bcdcfz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
2688 int zone_lead = ps ? 0xF : 0x3;
2690 ppc_avr_t ret = { .u64 = { 0, 0 } };
2691 int sgnb = b->u8[BCD_DIG_BYTE(0)] >> 4;
2693 if (unlikely((sgnb < 0xA) && ps)) {
2697 for (i = 0; i < 16; i++) {
2698 zone_digit = i ? b->u8[BCD_DIG_BYTE(i * 2)] >> 4 : zone_lead;
2699 digit = b->u8[BCD_DIG_BYTE(i * 2)] & 0xF;
2700 if (unlikely(zone_digit != zone_lead || digit > 0x9)) {
2705 bcd_put_digit(&ret, digit, i + 1);
2708 if ((ps && (sgnb == 0xB || sgnb == 0xD)) ||
2709 (!ps && (sgnb & 0x4))) {
2710 bcd_put_digit(&ret, BCD_NEG_PREF, 0);
2712 bcd_put_digit(&ret, BCD_PLUS_PREF_1, 0);
2715 cr = bcd_cmp_zero(&ret);
2717 if (unlikely(invalid)) {
2726 uint32_t helper_bcdctz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
2731 int sgnb = bcd_get_sgn(b);
2732 int zone_lead = (ps) ? 0xF0 : 0x30;
2733 int invalid = (sgnb == 0);
2734 ppc_avr_t ret = { .u64 = { 0, 0 } };
2736 int ox_flag = ((b->VsrD(0) >> 4) != 0);
2738 for (i = 0; i < 16; i++) {
2739 digit = bcd_get_digit(b, i + 1, &invalid);
2741 if (unlikely(invalid)) {
2745 ret.u8[BCD_DIG_BYTE(i * 2)] = zone_lead + digit;
2749 bcd_put_digit(&ret, (sgnb == 1) ? 0xC : 0xD, 1);
2751 bcd_put_digit(&ret, (sgnb == 1) ? 0x3 : 0x7, 1);
2754 cr = bcd_cmp_zero(b);
2760 if (unlikely(invalid)) {
2769 uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
2775 ppc_avr_t ret = { .u64 = { 0, 0 } };
2777 if (b->VsrSD(0) < 0) {
2778 lo_value = -b->VsrSD(1);
2779 hi_value = ~b->VsrD(0) + !lo_value;
2780 bcd_put_digit(&ret, 0xD, 0);
2782 lo_value = b->VsrD(1);
2783 hi_value = b->VsrD(0);
2784 bcd_put_digit(&ret, bcd_preferred_sgn(0, ps), 0);
2787 if (divu128(&lo_value, &hi_value, 1000000000000000ULL) ||
2788 lo_value > 9999999999999999ULL) {
2792 for (i = 1; i < 16; hi_value /= 10, i++) {
2793 bcd_put_digit(&ret, hi_value % 10, i);
2796 for (; i < 32; lo_value /= 10, i++) {
2797 bcd_put_digit(&ret, lo_value % 10, i);
2800 cr |= bcd_cmp_zero(&ret);
2807 uint32_t helper_bcdctsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
2814 uint64_t hi_value = 0;
2815 int sgnb = bcd_get_sgn(b);
2816 int invalid = (sgnb == 0);
2818 lo_value = bcd_get_digit(b, 31, &invalid);
2819 for (i = 30; i > 0; i--) {
2820 mulu64(&lo_value, &carry, lo_value, 10ULL);
2821 mulu64(&hi_value, &unused, hi_value, 10ULL);
2822 lo_value += bcd_get_digit(b, i, &invalid);
2825 if (unlikely(invalid)) {
2831 r->VsrSD(1) = -lo_value;
2832 r->VsrSD(0) = ~hi_value + !r->VsrSD(1);
2834 r->VsrSD(1) = lo_value;
2835 r->VsrSD(0) = hi_value;
2838 cr = bcd_cmp_zero(b);
2840 if (unlikely(invalid)) {
2847 uint32_t helper_bcdcpsgn(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
2852 if (bcd_get_sgn(a) == 0 || bcd_get_sgn(b) == 0) {
2857 bcd_put_digit(r, b->u8[BCD_DIG_BYTE(0)] & 0xF, 0);
2859 for (i = 1; i < 32; i++) {
2860 bcd_get_digit(a, i, &invalid);
2861 bcd_get_digit(b, i, &invalid);
2862 if (unlikely(invalid)) {
2867 return bcd_cmp_zero(r);
2870 uint32_t helper_bcdsetsgn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
2872 int sgnb = bcd_get_sgn(b);
2875 bcd_put_digit(r, bcd_preferred_sgn(sgnb, ps), 0);
2877 if (bcd_is_valid(b) == false) {
2881 return bcd_cmp_zero(r);
2884 uint32_t helper_bcds(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
2887 #if defined(HOST_WORDS_BIGENDIAN)
2892 bool ox_flag = false;
2893 int sgnb = bcd_get_sgn(b);
2895 ret.VsrD(1) &= ~0xf;
2897 if (bcd_is_valid(b) == false) {
2901 if (unlikely(i > 31)) {
2903 } else if (unlikely(i < -31)) {
2908 ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag);
2910 urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4);
2912 bcd_put_digit(&ret, bcd_preferred_sgn(sgnb, ps), 0);
2916 cr = bcd_cmp_zero(r);
2924 uint32_t helper_bcdus(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
2929 bool ox_flag = false;
2932 for (i = 0; i < 32; i++) {
2933 bcd_get_digit(b, i, &invalid);
2935 if (unlikely(invalid)) {
2940 #if defined(HOST_WORDS_BIGENDIAN)
2947 ret.VsrD(1) = ret.VsrD(0) = 0;
2948 } else if (i <= -32) {
2949 ret.VsrD(1) = ret.VsrD(0) = 0;
2951 ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag);
2953 urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4);
2957 cr = bcd_cmp_zero(r);
2965 uint32_t helper_bcdsr(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
2970 bool ox_flag = false;
2971 int sgnb = bcd_get_sgn(b);
2973 ret.VsrD(1) &= ~0xf;
2975 #if defined(HOST_WORDS_BIGENDIAN)
2977 ppc_avr_t bcd_one = { .u64 = { 0, 0x10 } };
2980 ppc_avr_t bcd_one = { .u64 = { 0x10, 0 } };
2983 if (bcd_is_valid(b) == false) {
2987 if (unlikely(i > 31)) {
2989 } else if (unlikely(i < -31)) {
2994 ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag);
2996 urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4);
2998 if (bcd_get_digit(&ret, 0, &invalid) >= 5) {
2999 bcd_add_mag(&ret, &ret, &bcd_one, &invalid, &unused);
3002 bcd_put_digit(&ret, bcd_preferred_sgn(sgnb, ps), 0);
3004 cr = bcd_cmp_zero(&ret);
3013 uint32_t helper_bcdtrunc(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
3016 uint32_t ox_flag = 0;
3017 #if defined(HOST_WORDS_BIGENDIAN)
3018 int i = a->s16[3] + 1;
3020 int i = a->s16[4] + 1;
3024 if (bcd_is_valid(b) == false) {
3028 if (i > 16 && i < 32) {
3029 mask = (uint64_t)-1 >> (128 - i * 4);
3030 if (ret.VsrD(0) & ~mask) {
3034 ret.VsrD(0) &= mask;
3035 } else if (i >= 0 && i <= 16) {
3036 mask = (uint64_t)-1 >> (64 - i * 4);
3037 if (ret.VsrD(0) || (ret.VsrD(1) & ~mask)) {
3041 ret.VsrD(1) &= mask;
3044 bcd_put_digit(&ret, bcd_preferred_sgn(bcd_get_sgn(b), ps), 0);
3047 return bcd_cmp_zero(&ret) | ox_flag;
3050 uint32_t helper_bcdutrunc(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
3054 uint32_t ox_flag = 0;
3058 for (i = 0; i < 32; i++) {
3059 bcd_get_digit(b, i, &invalid);
3061 if (unlikely(invalid)) {
3066 #if defined(HOST_WORDS_BIGENDIAN)
3071 if (i > 16 && i < 33) {
3072 mask = (uint64_t)-1 >> (128 - i * 4);
3073 if (ret.VsrD(0) & ~mask) {
3077 ret.VsrD(0) &= mask;
3078 } else if (i > 0 && i <= 16) {
3079 mask = (uint64_t)-1 >> (64 - i * 4);
3080 if (ret.VsrD(0) || (ret.VsrD(1) & ~mask)) {
3084 ret.VsrD(1) &= mask;
3086 } else if (i == 0) {
3087 if (ret.VsrD(0) || ret.VsrD(1)) {
3090 ret.VsrD(0) = ret.VsrD(1) = 0;
3094 if (r->VsrD(0) == 0 && r->VsrD(1) == 0) {
3095 return ox_flag | CRF_EQ;
3098 return ox_flag | CRF_GT;
3101 void helper_vsbox(ppc_avr_t *r, ppc_avr_t *a)
3104 VECTOR_FOR_INORDER_I(i, u8) {
3105 r->u8[i] = AES_sbox[a->u8[i]];
3109 void helper_vcipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3114 VECTOR_FOR_INORDER_I(i, u32) {
3115 result.VsrW(i) = b->VsrW(i) ^
3116 (AES_Te0[a->VsrB(AES_shifts[4 * i + 0])] ^
3117 AES_Te1[a->VsrB(AES_shifts[4 * i + 1])] ^
3118 AES_Te2[a->VsrB(AES_shifts[4 * i + 2])] ^
3119 AES_Te3[a->VsrB(AES_shifts[4 * i + 3])]);
3124 void helper_vcipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3129 VECTOR_FOR_INORDER_I(i, u8) {
3130 result.VsrB(i) = b->VsrB(i) ^ (AES_sbox[a->VsrB(AES_shifts[i])]);
3135 void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3137 /* This differs from what is written in ISA V2.07. The RTL is */
3138 /* incorrect and will be fixed in V2.07B. */
3142 VECTOR_FOR_INORDER_I(i, u8) {
3143 tmp.VsrB(i) = b->VsrB(i) ^ AES_isbox[a->VsrB(AES_ishifts[i])];
3146 VECTOR_FOR_INORDER_I(i, u32) {
3148 AES_imc[tmp.VsrB(4 * i + 0)][0] ^
3149 AES_imc[tmp.VsrB(4 * i + 1)][1] ^
3150 AES_imc[tmp.VsrB(4 * i + 2)][2] ^
3151 AES_imc[tmp.VsrB(4 * i + 3)][3];
3155 void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3160 VECTOR_FOR_INORDER_I(i, u8) {
3161 result.VsrB(i) = b->VsrB(i) ^ (AES_isbox[a->VsrB(AES_ishifts[i])]);
3166 void helper_vshasigmaw(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
3168 int st = (st_six & 0x10) != 0;
3169 int six = st_six & 0xF;
3172 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
3174 if ((six & (0x8 >> i)) == 0) {
3175 r->VsrW(i) = ror32(a->VsrW(i), 7) ^
3176 ror32(a->VsrW(i), 18) ^
3178 } else { /* six.bit[i] == 1 */
3179 r->VsrW(i) = ror32(a->VsrW(i), 17) ^
3180 ror32(a->VsrW(i), 19) ^
3183 } else { /* st == 1 */
3184 if ((six & (0x8 >> i)) == 0) {
3185 r->VsrW(i) = ror32(a->VsrW(i), 2) ^
3186 ror32(a->VsrW(i), 13) ^
3187 ror32(a->VsrW(i), 22);
3188 } else { /* six.bit[i] == 1 */
3189 r->VsrW(i) = ror32(a->VsrW(i), 6) ^
3190 ror32(a->VsrW(i), 11) ^
3191 ror32(a->VsrW(i), 25);
3197 void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
3199 int st = (st_six & 0x10) != 0;
3200 int six = st_six & 0xF;
3203 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
3205 if ((six & (0x8 >> (2 * i))) == 0) {
3206 r->VsrD(i) = ror64(a->VsrD(i), 1) ^
3207 ror64(a->VsrD(i), 8) ^
3209 } else { /* six.bit[2*i] == 1 */
3210 r->VsrD(i) = ror64(a->VsrD(i), 19) ^
3211 ror64(a->VsrD(i), 61) ^
3214 } else { /* st == 1 */
3215 if ((six & (0x8 >> (2 * i))) == 0) {
3216 r->VsrD(i) = ror64(a->VsrD(i), 28) ^
3217 ror64(a->VsrD(i), 34) ^
3218 ror64(a->VsrD(i), 39);
3219 } else { /* six.bit[2*i] == 1 */
3220 r->VsrD(i) = ror64(a->VsrD(i), 14) ^
3221 ror64(a->VsrD(i), 18) ^
3222 ror64(a->VsrD(i), 41);
3228 void helper_vpermxor(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
3233 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
3234 int indexA = c->VsrB(i) >> 4;
3235 int indexB = c->VsrB(i) & 0xF;
3237 result.VsrB(i) = a->VsrB(indexA) ^ b->VsrB(indexB);
3242 #undef VECTOR_FOR_INORDER_I
3244 /*****************************************************************************/
3245 /* SPE extension helpers */
3246 /* Use a table to make this quicker */
3247 static const uint8_t hbrev[16] = {
3248 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3249 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3252 static inline uint8_t byte_reverse(uint8_t val)
3254 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3257 static inline uint32_t word_reverse(uint32_t val)
3259 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3260 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3263 #define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */
3264 target_ulong helper_brinc(target_ulong arg1, target_ulong arg2)
3266 uint32_t a, b, d, mask;
3268 mask = UINT32_MAX >> (32 - MASKBITS);
3271 d = word_reverse(1 + word_reverse(a | ~b));
3272 return (arg1 & ~mask) | (d & b);
3275 uint32_t helper_cntlsw32(uint32_t val)
3277 if (val & 0x80000000) {
3284 uint32_t helper_cntlzw32(uint32_t val)
3290 target_ulong helper_dlmzb(CPUPPCState *env, target_ulong high,
3291 target_ulong low, uint32_t update_Rc)
3297 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
3298 if ((high & mask) == 0) {
3306 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
3307 if ((low & mask) == 0) {
3320 env->xer = (env->xer & ~0x7F) | i;
3322 env->crf[0] |= xer_so;