]> Git Repo - qemu.git/blame_incremental - target-ppc/int_helper.c
target-ppc: Altivec 2.07: Change VMUL_DO to Support 64-bit Integers
[qemu.git] / target-ppc / int_helper.c
... / ...
CommitLineData
1/*
2 * PowerPC integer and vector emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include "cpu.h"
20#include "qemu/host-utils.h"
21#include "helper.h"
22
23#include "helper_regs.h"
24/*****************************************************************************/
25/* Fixed point operations helpers */
26#if defined(TARGET_PPC64)
27
28uint64_t helper_mulldo(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
29{
30 int64_t th;
31 uint64_t tl;
32
33 muls64(&tl, (uint64_t *)&th, arg1, arg2);
34 /* If th != 0 && th != -1, then we had an overflow */
35 if (likely((uint64_t)(th + 1) <= 1)) {
36 env->ov = 0;
37 } else {
38 env->so = env->ov = 1;
39 }
40 return (int64_t)tl;
41}
42#endif
43
44target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb,
45 uint32_t oe)
46{
47 uint64_t rt = 0;
48 int overflow = 0;
49
50 uint64_t dividend = (uint64_t)ra << 32;
51 uint64_t divisor = (uint32_t)rb;
52
53 if (unlikely(divisor == 0)) {
54 overflow = 1;
55 } else {
56 rt = dividend / divisor;
57 overflow = rt > UINT32_MAX;
58 }
59
60 if (unlikely(overflow)) {
61 rt = 0; /* Undefined */
62 }
63
64 if (oe) {
65 if (unlikely(overflow)) {
66 env->so = env->ov = 1;
67 } else {
68 env->ov = 0;
69 }
70 }
71
72 return (target_ulong)rt;
73}
74
75target_ulong helper_divwe(CPUPPCState *env, target_ulong ra, target_ulong rb,
76 uint32_t oe)
77{
78 int64_t rt = 0;
79 int overflow = 0;
80
81 int64_t dividend = (int64_t)ra << 32;
82 int64_t divisor = (int64_t)((int32_t)rb);
83
84 if (unlikely((divisor == 0) ||
85 ((divisor == -1ull) && (dividend == INT64_MIN)))) {
86 overflow = 1;
87 } else {
88 rt = dividend / divisor;
89 overflow = rt != (int32_t)rt;
90 }
91
92 if (unlikely(overflow)) {
93 rt = 0; /* Undefined */
94 }
95
96 if (oe) {
97 if (unlikely(overflow)) {
98 env->so = env->ov = 1;
99 } else {
100 env->ov = 0;
101 }
102 }
103
104 return (target_ulong)rt;
105}
106
107#if defined(TARGET_PPC64)
108
109uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
110{
111 uint64_t rt = 0;
112 int overflow = 0;
113
114 overflow = divu128(&rt, &ra, rb);
115
116 if (unlikely(overflow)) {
117 rt = 0; /* Undefined */
118 }
119
120 if (oe) {
121 if (unlikely(overflow)) {
122 env->so = env->ov = 1;
123 } else {
124 env->ov = 0;
125 }
126 }
127
128 return rt;
129}
130
131uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
132{
133 int64_t rt = 0;
134 int64_t ra = (int64_t)rau;
135 int64_t rb = (int64_t)rbu;
136 int overflow = divs128(&rt, &ra, rb);
137
138 if (unlikely(overflow)) {
139 rt = 0; /* Undefined */
140 }
141
142 if (oe) {
143
144 if (unlikely(overflow)) {
145 env->so = env->ov = 1;
146 } else {
147 env->ov = 0;
148 }
149 }
150
151 return rt;
152}
153
154#endif
155
156
157target_ulong helper_cntlzw(target_ulong t)
158{
159 return clz32(t);
160}
161
162#if defined(TARGET_PPC64)
163target_ulong helper_cntlzd(target_ulong t)
164{
165 return clz64(t);
166}
167#endif
168
169#if defined(TARGET_PPC64)
170
171uint64_t helper_bpermd(uint64_t rs, uint64_t rb)
172{
173 int i;
174 uint64_t ra = 0;
175
176 for (i = 0; i < 8; i++) {
177 int index = (rs >> (i*8)) & 0xFF;
178 if (index < 64) {
179 if (rb & (1ull << (63-index))) {
180 ra |= 1 << i;
181 }
182 }
183 }
184 return ra;
185}
186
187#endif
188
189target_ulong helper_cmpb(target_ulong rs, target_ulong rb)
190{
191 target_ulong mask = 0xff;
192 target_ulong ra = 0;
193 int i;
194
195 for (i = 0; i < sizeof(target_ulong); i++) {
196 if ((rs & mask) == (rb & mask)) {
197 ra |= mask;
198 }
199 mask <<= 8;
200 }
201 return ra;
202}
203
204/* shift right arithmetic helper */
205target_ulong helper_sraw(CPUPPCState *env, target_ulong value,
206 target_ulong shift)
207{
208 int32_t ret;
209
210 if (likely(!(shift & 0x20))) {
211 if (likely((uint32_t)shift != 0)) {
212 shift &= 0x1f;
213 ret = (int32_t)value >> shift;
214 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
215 env->ca = 0;
216 } else {
217 env->ca = 1;
218 }
219 } else {
220 ret = (int32_t)value;
221 env->ca = 0;
222 }
223 } else {
224 ret = (int32_t)value >> 31;
225 env->ca = (ret != 0);
226 }
227 return (target_long)ret;
228}
229
230#if defined(TARGET_PPC64)
231target_ulong helper_srad(CPUPPCState *env, target_ulong value,
232 target_ulong shift)
233{
234 int64_t ret;
235
236 if (likely(!(shift & 0x40))) {
237 if (likely((uint64_t)shift != 0)) {
238 shift &= 0x3f;
239 ret = (int64_t)value >> shift;
240 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
241 env->ca = 0;
242 } else {
243 env->ca = 1;
244 }
245 } else {
246 ret = (int64_t)value;
247 env->ca = 0;
248 }
249 } else {
250 ret = (int64_t)value >> 63;
251 env->ca = (ret != 0);
252 }
253 return ret;
254}
255#endif
256
257#if defined(TARGET_PPC64)
258target_ulong helper_popcntb(target_ulong val)
259{
260 val = (val & 0x5555555555555555ULL) + ((val >> 1) &
261 0x5555555555555555ULL);
262 val = (val & 0x3333333333333333ULL) + ((val >> 2) &
263 0x3333333333333333ULL);
264 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
265 0x0f0f0f0f0f0f0f0fULL);
266 return val;
267}
268
269target_ulong helper_popcntw(target_ulong val)
270{
271 val = (val & 0x5555555555555555ULL) + ((val >> 1) &
272 0x5555555555555555ULL);
273 val = (val & 0x3333333333333333ULL) + ((val >> 2) &
274 0x3333333333333333ULL);
275 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
276 0x0f0f0f0f0f0f0f0fULL);
277 val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) &
278 0x00ff00ff00ff00ffULL);
279 val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
280 0x0000ffff0000ffffULL);
281 return val;
282}
283
284target_ulong helper_popcntd(target_ulong val)
285{
286 return ctpop64(val);
287}
288#else
289target_ulong helper_popcntb(target_ulong val)
290{
291 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
292 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
293 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
294 return val;
295}
296
297target_ulong helper_popcntw(target_ulong val)
298{
299 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
300 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
301 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
302 val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff);
303 val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
304 return val;
305}
306#endif
307
308/*****************************************************************************/
309/* PowerPC 601 specific instructions (POWER bridge) */
310target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2)
311{
312 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
313
314 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
315 (int32_t)arg2 == 0) {
316 env->spr[SPR_MQ] = 0;
317 return INT32_MIN;
318 } else {
319 env->spr[SPR_MQ] = tmp % arg2;
320 return tmp / (int32_t)arg2;
321 }
322}
323
324target_ulong helper_divo(CPUPPCState *env, target_ulong arg1,
325 target_ulong arg2)
326{
327 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
328
329 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
330 (int32_t)arg2 == 0) {
331 env->so = env->ov = 1;
332 env->spr[SPR_MQ] = 0;
333 return INT32_MIN;
334 } else {
335 env->spr[SPR_MQ] = tmp % arg2;
336 tmp /= (int32_t)arg2;
337 if ((int32_t)tmp != tmp) {
338 env->so = env->ov = 1;
339 } else {
340 env->ov = 0;
341 }
342 return tmp;
343 }
344}
345
346target_ulong helper_divs(CPUPPCState *env, target_ulong arg1,
347 target_ulong arg2)
348{
349 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
350 (int32_t)arg2 == 0) {
351 env->spr[SPR_MQ] = 0;
352 return INT32_MIN;
353 } else {
354 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
355 return (int32_t)arg1 / (int32_t)arg2;
356 }
357}
358
359target_ulong helper_divso(CPUPPCState *env, target_ulong arg1,
360 target_ulong arg2)
361{
362 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
363 (int32_t)arg2 == 0) {
364 env->so = env->ov = 1;
365 env->spr[SPR_MQ] = 0;
366 return INT32_MIN;
367 } else {
368 env->ov = 0;
369 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
370 return (int32_t)arg1 / (int32_t)arg2;
371 }
372}
373
374/*****************************************************************************/
375/* 602 specific instructions */
376/* mfrom is the most crazy instruction ever seen, imho ! */
377/* Real implementation uses a ROM table. Do the same */
378/* Extremely decomposed:
379 * -arg / 256
380 * return 256 * log10(10 + 1.0) + 0.5
381 */
382#if !defined(CONFIG_USER_ONLY)
383target_ulong helper_602_mfrom(target_ulong arg)
384{
385 if (likely(arg < 602)) {
386#include "mfrom_table.c"
387 return mfrom_ROM_table[arg];
388 } else {
389 return 0;
390 }
391}
392#endif
393
394/*****************************************************************************/
395/* Altivec extension helpers */
396#if defined(HOST_WORDS_BIGENDIAN)
397#define HI_IDX 0
398#define LO_IDX 1
399#else
400#define HI_IDX 1
401#define LO_IDX 0
402#endif
403
404#if defined(HOST_WORDS_BIGENDIAN)
405#define VECTOR_FOR_INORDER_I(index, element) \
406 for (index = 0; index < ARRAY_SIZE(r->element); index++)
407#else
408#define VECTOR_FOR_INORDER_I(index, element) \
409 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
410#endif
411
412/* Saturating arithmetic helpers. */
413#define SATCVT(from, to, from_type, to_type, min, max) \
414 static inline to_type cvt##from##to(from_type x, int *sat) \
415 { \
416 to_type r; \
417 \
418 if (x < (from_type)min) { \
419 r = min; \
420 *sat = 1; \
421 } else if (x > (from_type)max) { \
422 r = max; \
423 *sat = 1; \
424 } else { \
425 r = x; \
426 } \
427 return r; \
428 }
429#define SATCVTU(from, to, from_type, to_type, min, max) \
430 static inline to_type cvt##from##to(from_type x, int *sat) \
431 { \
432 to_type r; \
433 \
434 if (x > (from_type)max) { \
435 r = max; \
436 *sat = 1; \
437 } else { \
438 r = x; \
439 } \
440 return r; \
441 }
442SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
443SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
444SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
445
446SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
447SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
448SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
449SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
450SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
451SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
452#undef SATCVT
453#undef SATCVTU
454
455void helper_lvsl(ppc_avr_t *r, target_ulong sh)
456{
457 int i, j = (sh & 0xf);
458
459 VECTOR_FOR_INORDER_I(i, u8) {
460 r->u8[i] = j++;
461 }
462}
463
464void helper_lvsr(ppc_avr_t *r, target_ulong sh)
465{
466 int i, j = 0x10 - (sh & 0xf);
467
468 VECTOR_FOR_INORDER_I(i, u8) {
469 r->u8[i] = j++;
470 }
471}
472
473void helper_mtvscr(CPUPPCState *env, ppc_avr_t *r)
474{
475#if defined(HOST_WORDS_BIGENDIAN)
476 env->vscr = r->u32[3];
477#else
478 env->vscr = r->u32[0];
479#endif
480 set_flush_to_zero(vscr_nj, &env->vec_status);
481}
482
483void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
484{
485 int i;
486
487 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
488 r->u32[i] = ~a->u32[i] < b->u32[i];
489 }
490}
491
492#define VARITH_DO(name, op, element) \
493 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
494 { \
495 int i; \
496 \
497 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
498 r->element[i] = a->element[i] op b->element[i]; \
499 } \
500 }
501#define VARITH(suffix, element) \
502 VARITH_DO(add##suffix, +, element) \
503 VARITH_DO(sub##suffix, -, element)
504VARITH(ubm, u8)
505VARITH(uhm, u16)
506VARITH(uwm, u32)
507VARITH(udm, u64)
508#undef VARITH_DO
509#undef VARITH
510
511#define VARITHFP(suffix, func) \
512 void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
513 ppc_avr_t *b) \
514 { \
515 int i; \
516 \
517 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
518 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
519 } \
520 }
521VARITHFP(addfp, float32_add)
522VARITHFP(subfp, float32_sub)
523VARITHFP(minfp, float32_min)
524VARITHFP(maxfp, float32_max)
525#undef VARITHFP
526
527#define VARITHFPFMA(suffix, type) \
528 void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
529 ppc_avr_t *b, ppc_avr_t *c) \
530 { \
531 int i; \
532 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
533 r->f[i] = float32_muladd(a->f[i], c->f[i], b->f[i], \
534 type, &env->vec_status); \
535 } \
536 }
537VARITHFPFMA(maddfp, 0);
538VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c);
539#undef VARITHFPFMA
540
541#define VARITHSAT_CASE(type, op, cvt, element) \
542 { \
543 type result = (type)a->element[i] op (type)b->element[i]; \
544 r->element[i] = cvt(result, &sat); \
545 }
546
547#define VARITHSAT_DO(name, op, optype, cvt, element) \
548 void helper_v##name(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
549 ppc_avr_t *b) \
550 { \
551 int sat = 0; \
552 int i; \
553 \
554 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
555 switch (sizeof(r->element[0])) { \
556 case 1: \
557 VARITHSAT_CASE(optype, op, cvt, element); \
558 break; \
559 case 2: \
560 VARITHSAT_CASE(optype, op, cvt, element); \
561 break; \
562 case 4: \
563 VARITHSAT_CASE(optype, op, cvt, element); \
564 break; \
565 } \
566 } \
567 if (sat) { \
568 env->vscr |= (1 << VSCR_SAT); \
569 } \
570 }
571#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
572 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
573 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
574#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
575 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
576 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
577VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
578VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
579VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
580VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
581VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
582VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
583#undef VARITHSAT_CASE
584#undef VARITHSAT_DO
585#undef VARITHSAT_SIGNED
586#undef VARITHSAT_UNSIGNED
587
588#define VAVG_DO(name, element, etype) \
589 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
590 { \
591 int i; \
592 \
593 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
594 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
595 r->element[i] = x >> 1; \
596 } \
597 }
598
599#define VAVG(type, signed_element, signed_type, unsigned_element, \
600 unsigned_type) \
601 VAVG_DO(avgs##type, signed_element, signed_type) \
602 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
603VAVG(b, s8, int16_t, u8, uint16_t)
604VAVG(h, s16, int32_t, u16, uint32_t)
605VAVG(w, s32, int64_t, u32, uint64_t)
606#undef VAVG_DO
607#undef VAVG
608
609#define VCF(suffix, cvt, element) \
610 void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r, \
611 ppc_avr_t *b, uint32_t uim) \
612 { \
613 int i; \
614 \
615 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
616 float32 t = cvt(b->element[i], &env->vec_status); \
617 r->f[i] = float32_scalbn(t, -uim, &env->vec_status); \
618 } \
619 }
620VCF(ux, uint32_to_float32, u32)
621VCF(sx, int32_to_float32, s32)
622#undef VCF
623
624#define VCMP_DO(suffix, compare, element, record) \
625 void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
626 ppc_avr_t *a, ppc_avr_t *b) \
627 { \
628 uint32_t ones = (uint32_t)-1; \
629 uint32_t all = ones; \
630 uint32_t none = 0; \
631 int i; \
632 \
633 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
634 uint32_t result = (a->element[i] compare b->element[i] ? \
635 ones : 0x0); \
636 switch (sizeof(a->element[0])) { \
637 case 4: \
638 r->u32[i] = result; \
639 break; \
640 case 2: \
641 r->u16[i] = result; \
642 break; \
643 case 1: \
644 r->u8[i] = result; \
645 break; \
646 } \
647 all &= result; \
648 none |= result; \
649 } \
650 if (record) { \
651 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
652 } \
653 }
654#define VCMP(suffix, compare, element) \
655 VCMP_DO(suffix, compare, element, 0) \
656 VCMP_DO(suffix##_dot, compare, element, 1)
657VCMP(equb, ==, u8)
658VCMP(equh, ==, u16)
659VCMP(equw, ==, u32)
660VCMP(gtub, >, u8)
661VCMP(gtuh, >, u16)
662VCMP(gtuw, >, u32)
663VCMP(gtsb, >, s8)
664VCMP(gtsh, >, s16)
665VCMP(gtsw, >, s32)
666#undef VCMP_DO
667#undef VCMP
668
669#define VCMPFP_DO(suffix, compare, order, record) \
670 void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
671 ppc_avr_t *a, ppc_avr_t *b) \
672 { \
673 uint32_t ones = (uint32_t)-1; \
674 uint32_t all = ones; \
675 uint32_t none = 0; \
676 int i; \
677 \
678 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
679 uint32_t result; \
680 int rel = float32_compare_quiet(a->f[i], b->f[i], \
681 &env->vec_status); \
682 if (rel == float_relation_unordered) { \
683 result = 0; \
684 } else if (rel compare order) { \
685 result = ones; \
686 } else { \
687 result = 0; \
688 } \
689 r->u32[i] = result; \
690 all &= result; \
691 none |= result; \
692 } \
693 if (record) { \
694 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
695 } \
696 }
697#define VCMPFP(suffix, compare, order) \
698 VCMPFP_DO(suffix, compare, order, 0) \
699 VCMPFP_DO(suffix##_dot, compare, order, 1)
700VCMPFP(eqfp, ==, float_relation_equal)
701VCMPFP(gefp, !=, float_relation_less)
702VCMPFP(gtfp, ==, float_relation_greater)
703#undef VCMPFP_DO
704#undef VCMPFP
705
706static inline void vcmpbfp_internal(CPUPPCState *env, ppc_avr_t *r,
707 ppc_avr_t *a, ppc_avr_t *b, int record)
708{
709 int i;
710 int all_in = 0;
711
712 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
713 int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
714 if (le_rel == float_relation_unordered) {
715 r->u32[i] = 0xc0000000;
716 /* ALL_IN does not need to be updated here. */
717 } else {
718 float32 bneg = float32_chs(b->f[i]);
719 int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
720 int le = le_rel != float_relation_greater;
721 int ge = ge_rel != float_relation_less;
722
723 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
724 all_in |= (!le | !ge);
725 }
726 }
727 if (record) {
728 env->crf[6] = (all_in == 0) << 1;
729 }
730}
731
732void helper_vcmpbfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
733{
734 vcmpbfp_internal(env, r, a, b, 0);
735}
736
737void helper_vcmpbfp_dot(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
738 ppc_avr_t *b)
739{
740 vcmpbfp_internal(env, r, a, b, 1);
741}
742
743#define VCT(suffix, satcvt, element) \
744 void helper_vct##suffix(CPUPPCState *env, ppc_avr_t *r, \
745 ppc_avr_t *b, uint32_t uim) \
746 { \
747 int i; \
748 int sat = 0; \
749 float_status s = env->vec_status; \
750 \
751 set_float_rounding_mode(float_round_to_zero, &s); \
752 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
753 if (float32_is_any_nan(b->f[i])) { \
754 r->element[i] = 0; \
755 } else { \
756 float64 t = float32_to_float64(b->f[i], &s); \
757 int64_t j; \
758 \
759 t = float64_scalbn(t, uim, &s); \
760 j = float64_to_int64(t, &s); \
761 r->element[i] = satcvt(j, &sat); \
762 } \
763 } \
764 if (sat) { \
765 env->vscr |= (1 << VSCR_SAT); \
766 } \
767 }
768VCT(uxs, cvtsduw, u32)
769VCT(sxs, cvtsdsw, s32)
770#undef VCT
771
772void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
773 ppc_avr_t *b, ppc_avr_t *c)
774{
775 int sat = 0;
776 int i;
777
778 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
779 int32_t prod = a->s16[i] * b->s16[i];
780 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
781
782 r->s16[i] = cvtswsh(t, &sat);
783 }
784
785 if (sat) {
786 env->vscr |= (1 << VSCR_SAT);
787 }
788}
789
790void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
791 ppc_avr_t *b, ppc_avr_t *c)
792{
793 int sat = 0;
794 int i;
795
796 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
797 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
798 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
799 r->s16[i] = cvtswsh(t, &sat);
800 }
801
802 if (sat) {
803 env->vscr |= (1 << VSCR_SAT);
804 }
805}
806
807#define VMINMAX_DO(name, compare, element) \
808 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
809 { \
810 int i; \
811 \
812 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
813 if (a->element[i] compare b->element[i]) { \
814 r->element[i] = b->element[i]; \
815 } else { \
816 r->element[i] = a->element[i]; \
817 } \
818 } \
819 }
820#define VMINMAX(suffix, element) \
821 VMINMAX_DO(min##suffix, >, element) \
822 VMINMAX_DO(max##suffix, <, element)
823VMINMAX(sb, s8)
824VMINMAX(sh, s16)
825VMINMAX(sw, s32)
826VMINMAX(ub, u8)
827VMINMAX(uh, u16)
828VMINMAX(uw, u32)
829#undef VMINMAX_DO
830#undef VMINMAX
831
832void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
833{
834 int i;
835
836 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
837 int32_t prod = a->s16[i] * b->s16[i];
838 r->s16[i] = (int16_t) (prod + c->s16[i]);
839 }
840}
841
842#define VMRG_DO(name, element, highp) \
843 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
844 { \
845 ppc_avr_t result; \
846 int i; \
847 size_t n_elems = ARRAY_SIZE(r->element); \
848 \
849 for (i = 0; i < n_elems / 2; i++) { \
850 if (highp) { \
851 result.element[i*2+HI_IDX] = a->element[i]; \
852 result.element[i*2+LO_IDX] = b->element[i]; \
853 } else { \
854 result.element[n_elems - i * 2 - (1 + HI_IDX)] = \
855 b->element[n_elems - i - 1]; \
856 result.element[n_elems - i * 2 - (1 + LO_IDX)] = \
857 a->element[n_elems - i - 1]; \
858 } \
859 } \
860 *r = result; \
861 }
862#if defined(HOST_WORDS_BIGENDIAN)
863#define MRGHI 0
864#define MRGLO 1
865#else
866#define MRGHI 1
867#define MRGLO 0
868#endif
869#define VMRG(suffix, element) \
870 VMRG_DO(mrgl##suffix, element, MRGHI) \
871 VMRG_DO(mrgh##suffix, element, MRGLO)
872VMRG(b, u8)
873VMRG(h, u16)
874VMRG(w, u32)
875#undef VMRG_DO
876#undef VMRG
877#undef MRGHI
878#undef MRGLO
879
880void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
881 ppc_avr_t *b, ppc_avr_t *c)
882{
883 int32_t prod[16];
884 int i;
885
886 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
887 prod[i] = (int32_t)a->s8[i] * b->u8[i];
888 }
889
890 VECTOR_FOR_INORDER_I(i, s32) {
891 r->s32[i] = c->s32[i] + prod[4 * i] + prod[4 * i + 1] +
892 prod[4 * i + 2] + prod[4 * i + 3];
893 }
894}
895
896void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
897 ppc_avr_t *b, ppc_avr_t *c)
898{
899 int32_t prod[8];
900 int i;
901
902 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
903 prod[i] = a->s16[i] * b->s16[i];
904 }
905
906 VECTOR_FOR_INORDER_I(i, s32) {
907 r->s32[i] = c->s32[i] + prod[2 * i] + prod[2 * i + 1];
908 }
909}
910
911void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
912 ppc_avr_t *b, ppc_avr_t *c)
913{
914 int32_t prod[8];
915 int i;
916 int sat = 0;
917
918 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
919 prod[i] = (int32_t)a->s16[i] * b->s16[i];
920 }
921
922 VECTOR_FOR_INORDER_I(i, s32) {
923 int64_t t = (int64_t)c->s32[i] + prod[2 * i] + prod[2 * i + 1];
924
925 r->u32[i] = cvtsdsw(t, &sat);
926 }
927
928 if (sat) {
929 env->vscr |= (1 << VSCR_SAT);
930 }
931}
932
933void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
934 ppc_avr_t *b, ppc_avr_t *c)
935{
936 uint16_t prod[16];
937 int i;
938
939 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
940 prod[i] = a->u8[i] * b->u8[i];
941 }
942
943 VECTOR_FOR_INORDER_I(i, u32) {
944 r->u32[i] = c->u32[i] + prod[4 * i] + prod[4 * i + 1] +
945 prod[4 * i + 2] + prod[4 * i + 3];
946 }
947}
948
949void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
950 ppc_avr_t *b, ppc_avr_t *c)
951{
952 uint32_t prod[8];
953 int i;
954
955 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
956 prod[i] = a->u16[i] * b->u16[i];
957 }
958
959 VECTOR_FOR_INORDER_I(i, u32) {
960 r->u32[i] = c->u32[i] + prod[2 * i] + prod[2 * i + 1];
961 }
962}
963
964void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
965 ppc_avr_t *b, ppc_avr_t *c)
966{
967 uint32_t prod[8];
968 int i;
969 int sat = 0;
970
971 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
972 prod[i] = a->u16[i] * b->u16[i];
973 }
974
975 VECTOR_FOR_INORDER_I(i, s32) {
976 uint64_t t = (uint64_t)c->u32[i] + prod[2 * i] + prod[2 * i + 1];
977
978 r->u32[i] = cvtuduw(t, &sat);
979 }
980
981 if (sat) {
982 env->vscr |= (1 << VSCR_SAT);
983 }
984}
985
986#define VMUL_DO(name, mul_element, prod_element, cast, evenp) \
987 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
988 { \
989 int i; \
990 \
991 VECTOR_FOR_INORDER_I(i, prod_element) { \
992 if (evenp) { \
993 r->prod_element[i] = \
994 (cast)a->mul_element[i * 2 + HI_IDX] * \
995 (cast)b->mul_element[i * 2 + HI_IDX]; \
996 } else { \
997 r->prod_element[i] = \
998 (cast)a->mul_element[i * 2 + LO_IDX] * \
999 (cast)b->mul_element[i * 2 + LO_IDX]; \
1000 } \
1001 } \
1002 }
1003#define VMUL(suffix, mul_element, prod_element, cast) \
1004 VMUL_DO(mule##suffix, mul_element, prod_element, cast, 1) \
1005 VMUL_DO(mulo##suffix, mul_element, prod_element, cast, 0)
1006VMUL(sb, s8, s16, int16_t)
1007VMUL(sh, s16, s32, int32_t)
1008VMUL(ub, u8, u16, uint16_t)
1009VMUL(uh, u16, u32, uint32_t)
1010#undef VMUL_DO
1011#undef VMUL
1012
1013void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
1014 ppc_avr_t *c)
1015{
1016 ppc_avr_t result;
1017 int i;
1018
1019 VECTOR_FOR_INORDER_I(i, u8) {
1020 int s = c->u8[i] & 0x1f;
1021#if defined(HOST_WORDS_BIGENDIAN)
1022 int index = s & 0xf;
1023#else
1024 int index = 15 - (s & 0xf);
1025#endif
1026
1027 if (s & 0x10) {
1028 result.u8[i] = b->u8[index];
1029 } else {
1030 result.u8[i] = a->u8[index];
1031 }
1032 }
1033 *r = result;
1034}
1035
1036#if defined(HOST_WORDS_BIGENDIAN)
1037#define PKBIG 1
1038#else
1039#define PKBIG 0
1040#endif
1041void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1042{
1043 int i, j;
1044 ppc_avr_t result;
1045#if defined(HOST_WORDS_BIGENDIAN)
1046 const ppc_avr_t *x[2] = { a, b };
1047#else
1048 const ppc_avr_t *x[2] = { b, a };
1049#endif
1050
1051 VECTOR_FOR_INORDER_I(i, u64) {
1052 VECTOR_FOR_INORDER_I(j, u32) {
1053 uint32_t e = x[i]->u32[j];
1054
1055 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
1056 ((e >> 6) & 0x3e0) |
1057 ((e >> 3) & 0x1f));
1058 }
1059 }
1060 *r = result;
1061}
1062
1063#define VPK(suffix, from, to, cvt, dosat) \
1064 void helper_vpk##suffix(CPUPPCState *env, ppc_avr_t *r, \
1065 ppc_avr_t *a, ppc_avr_t *b) \
1066 { \
1067 int i; \
1068 int sat = 0; \
1069 ppc_avr_t result; \
1070 ppc_avr_t *a0 = PKBIG ? a : b; \
1071 ppc_avr_t *a1 = PKBIG ? b : a; \
1072 \
1073 VECTOR_FOR_INORDER_I(i, from) { \
1074 result.to[i] = cvt(a0->from[i], &sat); \
1075 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
1076 } \
1077 *r = result; \
1078 if (dosat && sat) { \
1079 env->vscr |= (1 << VSCR_SAT); \
1080 } \
1081 }
1082#define I(x, y) (x)
1083VPK(shss, s16, s8, cvtshsb, 1)
1084VPK(shus, s16, u8, cvtshub, 1)
1085VPK(swss, s32, s16, cvtswsh, 1)
1086VPK(swus, s32, u16, cvtswuh, 1)
1087VPK(uhus, u16, u8, cvtuhub, 1)
1088VPK(uwus, u32, u16, cvtuwuh, 1)
1089VPK(uhum, u16, u8, I, 0)
1090VPK(uwum, u32, u16, I, 0)
1091#undef I
1092#undef VPK
1093#undef PKBIG
1094
1095void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1096{
1097 int i;
1098
1099 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1100 r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
1101 }
1102}
1103
1104#define VRFI(suffix, rounding) \
1105 void helper_vrfi##suffix(CPUPPCState *env, ppc_avr_t *r, \
1106 ppc_avr_t *b) \
1107 { \
1108 int i; \
1109 float_status s = env->vec_status; \
1110 \
1111 set_float_rounding_mode(rounding, &s); \
1112 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
1113 r->f[i] = float32_round_to_int (b->f[i], &s); \
1114 } \
1115 }
1116VRFI(n, float_round_nearest_even)
1117VRFI(m, float_round_down)
1118VRFI(p, float_round_up)
1119VRFI(z, float_round_to_zero)
1120#undef VRFI
1121
1122#define VROTATE(suffix, element) \
1123 void helper_vrl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1124 { \
1125 int i; \
1126 \
1127 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1128 unsigned int mask = ((1 << \
1129 (3 + (sizeof(a->element[0]) >> 1))) \
1130 - 1); \
1131 unsigned int shift = b->element[i] & mask; \
1132 r->element[i] = (a->element[i] << shift) | \
1133 (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
1134 } \
1135 }
1136VROTATE(b, u8)
1137VROTATE(h, u16)
1138VROTATE(w, u32)
1139#undef VROTATE
1140
1141void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1142{
1143 int i;
1144
1145 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1146 float32 t = float32_sqrt(b->f[i], &env->vec_status);
1147
1148 r->f[i] = float32_div(float32_one, t, &env->vec_status);
1149 }
1150}
1151
1152void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
1153 ppc_avr_t *c)
1154{
1155 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
1156 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
1157}
1158
1159void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1160{
1161 int i;
1162
1163 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1164 r->f[i] = float32_exp2(b->f[i], &env->vec_status);
1165 }
1166}
1167
1168void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1169{
1170 int i;
1171
1172 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1173 r->f[i] = float32_log2(b->f[i], &env->vec_status);
1174 }
1175}
1176
1177#if defined(HOST_WORDS_BIGENDIAN)
1178#define LEFT 0
1179#define RIGHT 1
1180#else
1181#define LEFT 1
1182#define RIGHT 0
1183#endif
1184/* The specification says that the results are undefined if all of the
1185 * shift counts are not identical. We check to make sure that they are
1186 * to conform to what real hardware appears to do. */
1187#define VSHIFT(suffix, leftp) \
1188 void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1189 { \
1190 int shift = b->u8[LO_IDX*15] & 0x7; \
1191 int doit = 1; \
1192 int i; \
1193 \
1194 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
1195 doit = doit && ((b->u8[i] & 0x7) == shift); \
1196 } \
1197 if (doit) { \
1198 if (shift == 0) { \
1199 *r = *a; \
1200 } else if (leftp) { \
1201 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
1202 \
1203 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
1204 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
1205 } else { \
1206 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
1207 \
1208 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
1209 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
1210 } \
1211 } \
1212 }
1213VSHIFT(l, LEFT)
1214VSHIFT(r, RIGHT)
1215#undef VSHIFT
1216#undef LEFT
1217#undef RIGHT
1218
1219#define VSL(suffix, element) \
1220 void helper_vsl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1221 { \
1222 int i; \
1223 \
1224 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1225 unsigned int mask = ((1 << \
1226 (3 + (sizeof(a->element[0]) >> 1))) \
1227 - 1); \
1228 unsigned int shift = b->element[i] & mask; \
1229 \
1230 r->element[i] = a->element[i] << shift; \
1231 } \
1232 }
1233VSL(b, u8)
1234VSL(h, u16)
1235VSL(w, u32)
1236#undef VSL
1237
1238void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
1239{
1240 int sh = shift & 0xf;
1241 int i;
1242 ppc_avr_t result;
1243
1244#if defined(HOST_WORDS_BIGENDIAN)
1245 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
1246 int index = sh + i;
1247 if (index > 0xf) {
1248 result.u8[i] = b->u8[index - 0x10];
1249 } else {
1250 result.u8[i] = a->u8[index];
1251 }
1252 }
1253#else
1254 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
1255 int index = (16 - sh) + i;
1256 if (index > 0xf) {
1257 result.u8[i] = a->u8[index - 0x10];
1258 } else {
1259 result.u8[i] = b->u8[index];
1260 }
1261 }
1262#endif
1263 *r = result;
1264}
1265
1266void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1267{
1268 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
1269
1270#if defined(HOST_WORDS_BIGENDIAN)
1271 memmove(&r->u8[0], &a->u8[sh], 16 - sh);
1272 memset(&r->u8[16-sh], 0, sh);
1273#else
1274 memmove(&r->u8[sh], &a->u8[0], 16 - sh);
1275 memset(&r->u8[0], 0, sh);
1276#endif
1277}
1278
1279/* Experimental testing shows that hardware masks the immediate. */
1280#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
1281#if defined(HOST_WORDS_BIGENDIAN)
1282#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
1283#else
1284#define SPLAT_ELEMENT(element) \
1285 (ARRAY_SIZE(r->element) - 1 - _SPLAT_MASKED(element))
1286#endif
1287#define VSPLT(suffix, element) \
1288 void helper_vsplt##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
1289 { \
1290 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
1291 int i; \
1292 \
1293 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1294 r->element[i] = s; \
1295 } \
1296 }
1297VSPLT(b, u8)
1298VSPLT(h, u16)
1299VSPLT(w, u32)
1300#undef VSPLT
1301#undef SPLAT_ELEMENT
1302#undef _SPLAT_MASKED
1303
1304#define VSPLTI(suffix, element, splat_type) \
1305 void helper_vspltis##suffix(ppc_avr_t *r, uint32_t splat) \
1306 { \
1307 splat_type x = (int8_t)(splat << 3) >> 3; \
1308 int i; \
1309 \
1310 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1311 r->element[i] = x; \
1312 } \
1313 }
1314VSPLTI(b, s8, int8_t)
1315VSPLTI(h, s16, int16_t)
1316VSPLTI(w, s32, int32_t)
1317#undef VSPLTI
1318
1319#define VSR(suffix, element) \
1320 void helper_vsr##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1321 { \
1322 int i; \
1323 \
1324 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1325 unsigned int mask = ((1 << \
1326 (3 + (sizeof(a->element[0]) >> 1))) \
1327 - 1); \
1328 unsigned int shift = b->element[i] & mask; \
1329 \
1330 r->element[i] = a->element[i] >> shift; \
1331 } \
1332 }
1333VSR(ab, s8)
1334VSR(ah, s16)
1335VSR(aw, s32)
1336VSR(b, u8)
1337VSR(h, u16)
1338VSR(w, u32)
1339#undef VSR
1340
1341void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1342{
1343 int sh = (b->u8[LO_IDX * 0xf] >> 3) & 0xf;
1344
1345#if defined(HOST_WORDS_BIGENDIAN)
1346 memmove(&r->u8[sh], &a->u8[0], 16 - sh);
1347 memset(&r->u8[0], 0, sh);
1348#else
1349 memmove(&r->u8[0], &a->u8[sh], 16 - sh);
1350 memset(&r->u8[16 - sh], 0, sh);
1351#endif
1352}
1353
1354void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1355{
1356 int i;
1357
1358 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1359 r->u32[i] = a->u32[i] >= b->u32[i];
1360 }
1361}
1362
1363void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1364{
1365 int64_t t;
1366 int i, upper;
1367 ppc_avr_t result;
1368 int sat = 0;
1369
1370#if defined(HOST_WORDS_BIGENDIAN)
1371 upper = ARRAY_SIZE(r->s32)-1;
1372#else
1373 upper = 0;
1374#endif
1375 t = (int64_t)b->s32[upper];
1376 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1377 t += a->s32[i];
1378 result.s32[i] = 0;
1379 }
1380 result.s32[upper] = cvtsdsw(t, &sat);
1381 *r = result;
1382
1383 if (sat) {
1384 env->vscr |= (1 << VSCR_SAT);
1385 }
1386}
1387
1388void helper_vsum2sws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1389{
1390 int i, j, upper;
1391 ppc_avr_t result;
1392 int sat = 0;
1393
1394#if defined(HOST_WORDS_BIGENDIAN)
1395 upper = 1;
1396#else
1397 upper = 0;
1398#endif
1399 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
1400 int64_t t = (int64_t)b->s32[upper + i * 2];
1401
1402 result.u64[i] = 0;
1403 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
1404 t += a->s32[2 * i + j];
1405 }
1406 result.s32[upper + i * 2] = cvtsdsw(t, &sat);
1407 }
1408
1409 *r = result;
1410 if (sat) {
1411 env->vscr |= (1 << VSCR_SAT);
1412 }
1413}
1414
1415void helper_vsum4sbs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1416{
1417 int i, j;
1418 int sat = 0;
1419
1420 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1421 int64_t t = (int64_t)b->s32[i];
1422
1423 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
1424 t += a->s8[4 * i + j];
1425 }
1426 r->s32[i] = cvtsdsw(t, &sat);
1427 }
1428
1429 if (sat) {
1430 env->vscr |= (1 << VSCR_SAT);
1431 }
1432}
1433
1434void helper_vsum4shs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1435{
1436 int sat = 0;
1437 int i;
1438
1439 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1440 int64_t t = (int64_t)b->s32[i];
1441
1442 t += a->s16[2 * i] + a->s16[2 * i + 1];
1443 r->s32[i] = cvtsdsw(t, &sat);
1444 }
1445
1446 if (sat) {
1447 env->vscr |= (1 << VSCR_SAT);
1448 }
1449}
1450
1451void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1452{
1453 int i, j;
1454 int sat = 0;
1455
1456 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1457 uint64_t t = (uint64_t)b->u32[i];
1458
1459 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
1460 t += a->u8[4 * i + j];
1461 }
1462 r->u32[i] = cvtuduw(t, &sat);
1463 }
1464
1465 if (sat) {
1466 env->vscr |= (1 << VSCR_SAT);
1467 }
1468}
1469
1470#if defined(HOST_WORDS_BIGENDIAN)
1471#define UPKHI 1
1472#define UPKLO 0
1473#else
1474#define UPKHI 0
1475#define UPKLO 1
1476#endif
1477#define VUPKPX(suffix, hi) \
1478 void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
1479 { \
1480 int i; \
1481 ppc_avr_t result; \
1482 \
1483 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
1484 uint16_t e = b->u16[hi ? i : i+4]; \
1485 uint8_t a = (e >> 15) ? 0xff : 0; \
1486 uint8_t r = (e >> 10) & 0x1f; \
1487 uint8_t g = (e >> 5) & 0x1f; \
1488 uint8_t b = e & 0x1f; \
1489 \
1490 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
1491 } \
1492 *r = result; \
1493 }
1494VUPKPX(lpx, UPKLO)
1495VUPKPX(hpx, UPKHI)
1496#undef VUPKPX
1497
1498#define VUPK(suffix, unpacked, packee, hi) \
1499 void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
1500 { \
1501 int i; \
1502 ppc_avr_t result; \
1503 \
1504 if (hi) { \
1505 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
1506 result.unpacked[i] = b->packee[i]; \
1507 } \
1508 } else { \
1509 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \
1510 i++) { \
1511 result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
1512 } \
1513 } \
1514 *r = result; \
1515 }
1516VUPK(hsb, s16, s8, UPKHI)
1517VUPK(hsh, s32, s16, UPKHI)
1518VUPK(lsb, s16, s8, UPKLO)
1519VUPK(lsh, s32, s16, UPKLO)
1520#undef VUPK
1521#undef UPKHI
1522#undef UPKLO
1523
1524#undef VECTOR_FOR_INORDER_I
1525#undef HI_IDX
1526#undef LO_IDX
1527
1528/*****************************************************************************/
1529/* SPE extension helpers */
1530/* Use a table to make this quicker */
1531static const uint8_t hbrev[16] = {
1532 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1533 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1534};
1535
1536static inline uint8_t byte_reverse(uint8_t val)
1537{
1538 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1539}
1540
1541static inline uint32_t word_reverse(uint32_t val)
1542{
1543 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1544 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1545}
1546
1547#define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */
1548target_ulong helper_brinc(target_ulong arg1, target_ulong arg2)
1549{
1550 uint32_t a, b, d, mask;
1551
1552 mask = UINT32_MAX >> (32 - MASKBITS);
1553 a = arg1 & mask;
1554 b = arg2 & mask;
1555 d = word_reverse(1 + word_reverse(a | ~b));
1556 return (arg1 & ~mask) | (d & b);
1557}
1558
1559uint32_t helper_cntlsw32(uint32_t val)
1560{
1561 if (val & 0x80000000) {
1562 return clz32(~val);
1563 } else {
1564 return clz32(val);
1565 }
1566}
1567
1568uint32_t helper_cntlzw32(uint32_t val)
1569{
1570 return clz32(val);
1571}
1572
1573/* 440 specific */
1574target_ulong helper_dlmzb(CPUPPCState *env, target_ulong high,
1575 target_ulong low, uint32_t update_Rc)
1576{
1577 target_ulong mask;
1578 int i;
1579
1580 i = 1;
1581 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1582 if ((high & mask) == 0) {
1583 if (update_Rc) {
1584 env->crf[0] = 0x4;
1585 }
1586 goto done;
1587 }
1588 i++;
1589 }
1590 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1591 if ((low & mask) == 0) {
1592 if (update_Rc) {
1593 env->crf[0] = 0x8;
1594 }
1595 goto done;
1596 }
1597 i++;
1598 }
1599 if (update_Rc) {
1600 env->crf[0] = 0x2;
1601 }
1602 done:
1603 env->xer = (env->xer & ~0x7F) | i;
1604 if (update_Rc) {
1605 env->crf[0] |= xer_so;
1606 }
1607 return i;
1608}
This page took 0.036459 seconds and 4 git commands to generate.