]> Git Repo - qemu.git/blob - target-ppc/int_helper.c
Merge git://github.com/hw-claudio/qemu-aarch64-queue into tcg-next
[qemu.git] / target-ppc / int_helper.c
1 /*
2  *  PowerPC integer and vector emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "cpu.h"
20 #include "qemu/host-utils.h"
21 #include "helper.h"
22
23 #include "helper_regs.h"
24 /*****************************************************************************/
25 /* Fixed point operations helpers */
26 #if defined(TARGET_PPC64)
27
28 uint64_t helper_mulldo(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
29 {
30     int64_t th;
31     uint64_t tl;
32
33     muls64(&tl, (uint64_t *)&th, arg1, arg2);
34     /* If th != 0 && th != -1, then we had an overflow */
35     if (likely((uint64_t)(th + 1) <= 1)) {
36         env->ov = 0;
37     } else {
38         env->so = env->ov = 1;
39     }
40     return (int64_t)tl;
41 }
42 #endif
43
44 target_ulong helper_cntlzw(target_ulong t)
45 {
46     return clz32(t);
47 }
48
49 #if defined(TARGET_PPC64)
50 target_ulong helper_cntlzd(target_ulong t)
51 {
52     return clz64(t);
53 }
54 #endif
55
56 target_ulong helper_cmpb(target_ulong rs, target_ulong rb)
57 {
58     target_ulong mask = 0xff;
59     target_ulong ra = 0;
60     int i;
61
62     for (i = 0; i < sizeof(target_ulong); i++) {
63         if ((rs & mask) == (rb & mask)) {
64             ra |= mask;
65         }
66         mask <<= 8;
67     }
68     return ra;
69 }
70
71 /* shift right arithmetic helper */
72 target_ulong helper_sraw(CPUPPCState *env, target_ulong value,
73                          target_ulong shift)
74 {
75     int32_t ret;
76
77     if (likely(!(shift & 0x20))) {
78         if (likely((uint32_t)shift != 0)) {
79             shift &= 0x1f;
80             ret = (int32_t)value >> shift;
81             if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
82                 env->ca = 0;
83             } else {
84                 env->ca = 1;
85             }
86         } else {
87             ret = (int32_t)value;
88             env->ca = 0;
89         }
90     } else {
91         ret = (int32_t)value >> 31;
92         env->ca = (ret != 0);
93     }
94     return (target_long)ret;
95 }
96
97 #if defined(TARGET_PPC64)
98 target_ulong helper_srad(CPUPPCState *env, target_ulong value,
99                          target_ulong shift)
100 {
101     int64_t ret;
102
103     if (likely(!(shift & 0x40))) {
104         if (likely((uint64_t)shift != 0)) {
105             shift &= 0x3f;
106             ret = (int64_t)value >> shift;
107             if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
108                 env->ca = 0;
109             } else {
110                 env->ca = 1;
111             }
112         } else {
113             ret = (int64_t)value;
114             env->ca = 0;
115         }
116     } else {
117         ret = (int64_t)value >> 63;
118         env->ca = (ret != 0);
119     }
120     return ret;
121 }
122 #endif
123
124 #if defined(TARGET_PPC64)
125 target_ulong helper_popcntb(target_ulong val)
126 {
127     val = (val & 0x5555555555555555ULL) + ((val >>  1) &
128                                            0x5555555555555555ULL);
129     val = (val & 0x3333333333333333ULL) + ((val >>  2) &
130                                            0x3333333333333333ULL);
131     val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) &
132                                            0x0f0f0f0f0f0f0f0fULL);
133     return val;
134 }
135
136 target_ulong helper_popcntw(target_ulong val)
137 {
138     val = (val & 0x5555555555555555ULL) + ((val >>  1) &
139                                            0x5555555555555555ULL);
140     val = (val & 0x3333333333333333ULL) + ((val >>  2) &
141                                            0x3333333333333333ULL);
142     val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) &
143                                            0x0f0f0f0f0f0f0f0fULL);
144     val = (val & 0x00ff00ff00ff00ffULL) + ((val >>  8) &
145                                            0x00ff00ff00ff00ffULL);
146     val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
147                                            0x0000ffff0000ffffULL);
148     return val;
149 }
150
151 target_ulong helper_popcntd(target_ulong val)
152 {
153     return ctpop64(val);
154 }
155 #else
156 target_ulong helper_popcntb(target_ulong val)
157 {
158     val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
159     val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
160     val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
161     return val;
162 }
163
164 target_ulong helper_popcntw(target_ulong val)
165 {
166     val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
167     val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
168     val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
169     val = (val & 0x00ff00ff) + ((val >>  8) & 0x00ff00ff);
170     val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
171     return val;
172 }
173 #endif
174
175 /*****************************************************************************/
176 /* PowerPC 601 specific instructions (POWER bridge) */
177 target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2)
178 {
179     uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
180
181     if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
182         (int32_t)arg2 == 0) {
183         env->spr[SPR_MQ] = 0;
184         return INT32_MIN;
185     } else {
186         env->spr[SPR_MQ] = tmp % arg2;
187         return  tmp / (int32_t)arg2;
188     }
189 }
190
191 target_ulong helper_divo(CPUPPCState *env, target_ulong arg1,
192                          target_ulong arg2)
193 {
194     uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
195
196     if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
197         (int32_t)arg2 == 0) {
198         env->so = env->ov = 1;
199         env->spr[SPR_MQ] = 0;
200         return INT32_MIN;
201     } else {
202         env->spr[SPR_MQ] = tmp % arg2;
203         tmp /= (int32_t)arg2;
204         if ((int32_t)tmp != tmp) {
205             env->so = env->ov = 1;
206         } else {
207             env->ov = 0;
208         }
209         return tmp;
210     }
211 }
212
213 target_ulong helper_divs(CPUPPCState *env, target_ulong arg1,
214                          target_ulong arg2)
215 {
216     if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
217         (int32_t)arg2 == 0) {
218         env->spr[SPR_MQ] = 0;
219         return INT32_MIN;
220     } else {
221         env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
222         return (int32_t)arg1 / (int32_t)arg2;
223     }
224 }
225
226 target_ulong helper_divso(CPUPPCState *env, target_ulong arg1,
227                           target_ulong arg2)
228 {
229     if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
230         (int32_t)arg2 == 0) {
231         env->so = env->ov = 1;
232         env->spr[SPR_MQ] = 0;
233         return INT32_MIN;
234     } else {
235         env->ov = 0;
236         env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
237         return (int32_t)arg1 / (int32_t)arg2;
238     }
239 }
240
241 /*****************************************************************************/
242 /* 602 specific instructions */
243 /* mfrom is the most crazy instruction ever seen, imho ! */
244 /* Real implementation uses a ROM table. Do the same */
245 /* Extremely decomposed:
246  *                      -arg / 256
247  * return 256 * log10(10           + 1.0) + 0.5
248  */
249 #if !defined(CONFIG_USER_ONLY)
250 target_ulong helper_602_mfrom(target_ulong arg)
251 {
252     if (likely(arg < 602)) {
253 #include "mfrom_table.c"
254         return mfrom_ROM_table[arg];
255     } else {
256         return 0;
257     }
258 }
259 #endif
260
261 /*****************************************************************************/
262 /* Altivec extension helpers */
263 #if defined(HOST_WORDS_BIGENDIAN)
264 #define HI_IDX 0
265 #define LO_IDX 1
266 #else
267 #define HI_IDX 1
268 #define LO_IDX 0
269 #endif
270
271 #if defined(HOST_WORDS_BIGENDIAN)
272 #define VECTOR_FOR_INORDER_I(index, element)                    \
273     for (index = 0; index < ARRAY_SIZE(r->element); index++)
274 #else
275 #define VECTOR_FOR_INORDER_I(index, element)                    \
276     for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
277 #endif
278
279 /* Saturating arithmetic helpers.  */
280 #define SATCVT(from, to, from_type, to_type, min, max)          \
281     static inline to_type cvt##from##to(from_type x, int *sat)  \
282     {                                                           \
283         to_type r;                                              \
284                                                                 \
285         if (x < (from_type)min) {                               \
286             r = min;                                            \
287             *sat = 1;                                           \
288         } else if (x > (from_type)max) {                        \
289             r = max;                                            \
290             *sat = 1;                                           \
291         } else {                                                \
292             r = x;                                              \
293         }                                                       \
294         return r;                                               \
295     }
296 #define SATCVTU(from, to, from_type, to_type, min, max)         \
297     static inline to_type cvt##from##to(from_type x, int *sat)  \
298     {                                                           \
299         to_type r;                                              \
300                                                                 \
301         if (x > (from_type)max) {                               \
302             r = max;                                            \
303             *sat = 1;                                           \
304         } else {                                                \
305             r = x;                                              \
306         }                                                       \
307         return r;                                               \
308     }
309 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
310 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
311 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
312
313 SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
314 SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
315 SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
316 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
317 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
318 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
319 #undef SATCVT
320 #undef SATCVTU
321
322 void helper_lvsl(ppc_avr_t *r, target_ulong sh)
323 {
324     int i, j = (sh & 0xf);
325
326     VECTOR_FOR_INORDER_I(i, u8) {
327         r->u8[i] = j++;
328     }
329 }
330
331 void helper_lvsr(ppc_avr_t *r, target_ulong sh)
332 {
333     int i, j = 0x10 - (sh & 0xf);
334
335     VECTOR_FOR_INORDER_I(i, u8) {
336         r->u8[i] = j++;
337     }
338 }
339
340 void helper_mtvscr(CPUPPCState *env, ppc_avr_t *r)
341 {
342 #if defined(HOST_WORDS_BIGENDIAN)
343     env->vscr = r->u32[3];
344 #else
345     env->vscr = r->u32[0];
346 #endif
347     set_flush_to_zero(vscr_nj, &env->vec_status);
348 }
349
350 void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
351 {
352     int i;
353
354     for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
355         r->u32[i] = ~a->u32[i] < b->u32[i];
356     }
357 }
358
359 #define VARITH_DO(name, op, element)                                    \
360     void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)       \
361     {                                                                   \
362         int i;                                                          \
363                                                                         \
364         for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
365             r->element[i] = a->element[i] op b->element[i];             \
366         }                                                               \
367     }
368 #define VARITH(suffix, element)                 \
369     VARITH_DO(add##suffix, +, element)          \
370     VARITH_DO(sub##suffix, -, element)
371 VARITH(ubm, u8)
372 VARITH(uhm, u16)
373 VARITH(uwm, u32)
374 #undef VARITH_DO
375 #undef VARITH
376
377 #define VARITHFP(suffix, func)                                          \
378     void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
379                           ppc_avr_t *b)                                 \
380     {                                                                   \
381         int i;                                                          \
382                                                                         \
383         for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
384             r->f[i] = func(a->f[i], b->f[i], &env->vec_status);         \
385         }                                                               \
386     }
387 VARITHFP(addfp, float32_add)
388 VARITHFP(subfp, float32_sub)
389 VARITHFP(minfp, float32_min)
390 VARITHFP(maxfp, float32_max)
391 #undef VARITHFP
392
393 #define VARITHFPFMA(suffix, type)                                       \
394     void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
395                            ppc_avr_t *b, ppc_avr_t *c)                  \
396     {                                                                   \
397         int i;                                                          \
398         for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
399             r->f[i] = float32_muladd(a->f[i], c->f[i], b->f[i],         \
400                                      type, &env->vec_status);           \
401         }                                                               \
402     }
403 VARITHFPFMA(maddfp, 0);
404 VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c);
405 #undef VARITHFPFMA
406
407 #define VARITHSAT_CASE(type, op, cvt, element)                          \
408     {                                                                   \
409         type result = (type)a->element[i] op (type)b->element[i];       \
410         r->element[i] = cvt(result, &sat);                              \
411     }
412
413 #define VARITHSAT_DO(name, op, optype, cvt, element)                    \
414     void helper_v##name(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,   \
415                         ppc_avr_t *b)                                   \
416     {                                                                   \
417         int sat = 0;                                                    \
418         int i;                                                          \
419                                                                         \
420         for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
421             switch (sizeof(r->element[0])) {                            \
422             case 1:                                                     \
423                 VARITHSAT_CASE(optype, op, cvt, element);               \
424                 break;                                                  \
425             case 2:                                                     \
426                 VARITHSAT_CASE(optype, op, cvt, element);               \
427                 break;                                                  \
428             case 4:                                                     \
429                 VARITHSAT_CASE(optype, op, cvt, element);               \
430                 break;                                                  \
431             }                                                           \
432         }                                                               \
433         if (sat) {                                                      \
434             env->vscr |= (1 << VSCR_SAT);                               \
435         }                                                               \
436     }
437 #define VARITHSAT_SIGNED(suffix, element, optype, cvt)          \
438     VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)      \
439     VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
440 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)        \
441     VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)      \
442     VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
443 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
444 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
445 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
446 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
447 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
448 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
449 #undef VARITHSAT_CASE
450 #undef VARITHSAT_DO
451 #undef VARITHSAT_SIGNED
452 #undef VARITHSAT_UNSIGNED
453
454 #define VAVG_DO(name, element, etype)                                   \
455     void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)       \
456     {                                                                   \
457         int i;                                                          \
458                                                                         \
459         for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
460             etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
461             r->element[i] = x >> 1;                                     \
462         }                                                               \
463     }
464
465 #define VAVG(type, signed_element, signed_type, unsigned_element,       \
466              unsigned_type)                                             \
467     VAVG_DO(avgs##type, signed_element, signed_type)                    \
468     VAVG_DO(avgu##type, unsigned_element, unsigned_type)
469 VAVG(b, s8, int16_t, u8, uint16_t)
470 VAVG(h, s16, int32_t, u16, uint32_t)
471 VAVG(w, s32, int64_t, u32, uint64_t)
472 #undef VAVG_DO
473 #undef VAVG
474
475 #define VCF(suffix, cvt, element)                                       \
476     void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r,             \
477                             ppc_avr_t *b, uint32_t uim)                 \
478     {                                                                   \
479         int i;                                                          \
480                                                                         \
481         for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
482             float32 t = cvt(b->element[i], &env->vec_status);           \
483             r->f[i] = float32_scalbn(t, -uim, &env->vec_status);        \
484         }                                                               \
485     }
486 VCF(ux, uint32_to_float32, u32)
487 VCF(sx, int32_to_float32, s32)
488 #undef VCF
489
490 #define VCMP_DO(suffix, compare, element, record)                       \
491     void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r,            \
492                              ppc_avr_t *a, ppc_avr_t *b)                \
493     {                                                                   \
494         uint32_t ones = (uint32_t)-1;                                   \
495         uint32_t all = ones;                                            \
496         uint32_t none = 0;                                              \
497         int i;                                                          \
498                                                                         \
499         for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
500             uint32_t result = (a->element[i] compare b->element[i] ?    \
501                                ones : 0x0);                             \
502             switch (sizeof(a->element[0])) {                            \
503             case 4:                                                     \
504                 r->u32[i] = result;                                     \
505                 break;                                                  \
506             case 2:                                                     \
507                 r->u16[i] = result;                                     \
508                 break;                                                  \
509             case 1:                                                     \
510                 r->u8[i] = result;                                      \
511                 break;                                                  \
512             }                                                           \
513             all &= result;                                              \
514             none |= result;                                             \
515         }                                                               \
516         if (record) {                                                   \
517             env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
518         }                                                               \
519     }
520 #define VCMP(suffix, compare, element)          \
521     VCMP_DO(suffix, compare, element, 0)        \
522     VCMP_DO(suffix##_dot, compare, element, 1)
523 VCMP(equb, ==, u8)
524 VCMP(equh, ==, u16)
525 VCMP(equw, ==, u32)
526 VCMP(gtub, >, u8)
527 VCMP(gtuh, >, u16)
528 VCMP(gtuw, >, u32)
529 VCMP(gtsb, >, s8)
530 VCMP(gtsh, >, s16)
531 VCMP(gtsw, >, s32)
532 #undef VCMP_DO
533 #undef VCMP
534
535 #define VCMPFP_DO(suffix, compare, order, record)                       \
536     void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r,            \
537                              ppc_avr_t *a, ppc_avr_t *b)                \
538     {                                                                   \
539         uint32_t ones = (uint32_t)-1;                                   \
540         uint32_t all = ones;                                            \
541         uint32_t none = 0;                                              \
542         int i;                                                          \
543                                                                         \
544         for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
545             uint32_t result;                                            \
546             int rel = float32_compare_quiet(a->f[i], b->f[i],           \
547                                             &env->vec_status);          \
548             if (rel == float_relation_unordered) {                      \
549                 result = 0;                                             \
550             } else if (rel compare order) {                             \
551                 result = ones;                                          \
552             } else {                                                    \
553                 result = 0;                                             \
554             }                                                           \
555             r->u32[i] = result;                                         \
556             all &= result;                                              \
557             none |= result;                                             \
558         }                                                               \
559         if (record) {                                                   \
560             env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
561         }                                                               \
562     }
563 #define VCMPFP(suffix, compare, order)          \
564     VCMPFP_DO(suffix, compare, order, 0)        \
565     VCMPFP_DO(suffix##_dot, compare, order, 1)
566 VCMPFP(eqfp, ==, float_relation_equal)
567 VCMPFP(gefp, !=, float_relation_less)
568 VCMPFP(gtfp, ==, float_relation_greater)
569 #undef VCMPFP_DO
570 #undef VCMPFP
571
572 static inline void vcmpbfp_internal(CPUPPCState *env, ppc_avr_t *r,
573                                     ppc_avr_t *a, ppc_avr_t *b, int record)
574 {
575     int i;
576     int all_in = 0;
577
578     for (i = 0; i < ARRAY_SIZE(r->f); i++) {
579         int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
580         if (le_rel == float_relation_unordered) {
581             r->u32[i] = 0xc0000000;
582             /* ALL_IN does not need to be updated here.  */
583         } else {
584             float32 bneg = float32_chs(b->f[i]);
585             int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
586             int le = le_rel != float_relation_greater;
587             int ge = ge_rel != float_relation_less;
588
589             r->u32[i] = ((!le) << 31) | ((!ge) << 30);
590             all_in |= (!le | !ge);
591         }
592     }
593     if (record) {
594         env->crf[6] = (all_in == 0) << 1;
595     }
596 }
597
598 void helper_vcmpbfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
599 {
600     vcmpbfp_internal(env, r, a, b, 0);
601 }
602
603 void helper_vcmpbfp_dot(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
604                         ppc_avr_t *b)
605 {
606     vcmpbfp_internal(env, r, a, b, 1);
607 }
608
609 #define VCT(suffix, satcvt, element)                                    \
610     void helper_vct##suffix(CPUPPCState *env, ppc_avr_t *r,             \
611                             ppc_avr_t *b, uint32_t uim)                 \
612     {                                                                   \
613         int i;                                                          \
614         int sat = 0;                                                    \
615         float_status s = env->vec_status;                               \
616                                                                         \
617         set_float_rounding_mode(float_round_to_zero, &s);               \
618         for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
619             if (float32_is_any_nan(b->f[i])) {                          \
620                 r->element[i] = 0;                                      \
621             } else {                                                    \
622                 float64 t = float32_to_float64(b->f[i], &s);            \
623                 int64_t j;                                              \
624                                                                         \
625                 t = float64_scalbn(t, uim, &s);                         \
626                 j = float64_to_int64(t, &s);                            \
627                 r->element[i] = satcvt(j, &sat);                        \
628             }                                                           \
629         }                                                               \
630         if (sat) {                                                      \
631             env->vscr |= (1 << VSCR_SAT);                               \
632         }                                                               \
633     }
634 VCT(uxs, cvtsduw, u32)
635 VCT(sxs, cvtsdsw, s32)
636 #undef VCT
637
638 void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
639                       ppc_avr_t *b, ppc_avr_t *c)
640 {
641     int sat = 0;
642     int i;
643
644     for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
645         int32_t prod = a->s16[i] * b->s16[i];
646         int32_t t = (int32_t)c->s16[i] + (prod >> 15);
647
648         r->s16[i] = cvtswsh(t, &sat);
649     }
650
651     if (sat) {
652         env->vscr |= (1 << VSCR_SAT);
653     }
654 }
655
656 void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
657                        ppc_avr_t *b, ppc_avr_t *c)
658 {
659     int sat = 0;
660     int i;
661
662     for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
663         int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
664         int32_t t = (int32_t)c->s16[i] + (prod >> 15);
665         r->s16[i] = cvtswsh(t, &sat);
666     }
667
668     if (sat) {
669         env->vscr |= (1 << VSCR_SAT);
670     }
671 }
672
673 #define VMINMAX_DO(name, compare, element)                              \
674     void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)       \
675     {                                                                   \
676         int i;                                                          \
677                                                                         \
678         for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
679             if (a->element[i] compare b->element[i]) {                  \
680                 r->element[i] = b->element[i];                          \
681             } else {                                                    \
682                 r->element[i] = a->element[i];                          \
683             }                                                           \
684         }                                                               \
685     }
686 #define VMINMAX(suffix, element)                \
687     VMINMAX_DO(min##suffix, >, element)         \
688     VMINMAX_DO(max##suffix, <, element)
689 VMINMAX(sb, s8)
690 VMINMAX(sh, s16)
691 VMINMAX(sw, s32)
692 VMINMAX(ub, u8)
693 VMINMAX(uh, u16)
694 VMINMAX(uw, u32)
695 #undef VMINMAX_DO
696 #undef VMINMAX
697
698 void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
699 {
700     int i;
701
702     for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
703         int32_t prod = a->s16[i] * b->s16[i];
704         r->s16[i] = (int16_t) (prod + c->s16[i]);
705     }
706 }
707
708 #define VMRG_DO(name, element, highp)                                   \
709     void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)       \
710     {                                                                   \
711         ppc_avr_t result;                                               \
712         int i;                                                          \
713         size_t n_elems = ARRAY_SIZE(r->element);                        \
714                                                                         \
715         for (i = 0; i < n_elems / 2; i++) {                             \
716             if (highp) {                                                \
717                 result.element[i*2+HI_IDX] = a->element[i];             \
718                 result.element[i*2+LO_IDX] = b->element[i];             \
719             } else {                                                    \
720                 result.element[n_elems - i * 2 - (1 + HI_IDX)] =        \
721                     b->element[n_elems - i - 1];                        \
722                 result.element[n_elems - i * 2 - (1 + LO_IDX)] =        \
723                     a->element[n_elems - i - 1];                        \
724             }                                                           \
725         }                                                               \
726         *r = result;                                                    \
727     }
728 #if defined(HOST_WORDS_BIGENDIAN)
729 #define MRGHI 0
730 #define MRGLO 1
731 #else
732 #define MRGHI 1
733 #define MRGLO 0
734 #endif
735 #define VMRG(suffix, element)                   \
736     VMRG_DO(mrgl##suffix, element, MRGHI)       \
737     VMRG_DO(mrgh##suffix, element, MRGLO)
738 VMRG(b, u8)
739 VMRG(h, u16)
740 VMRG(w, u32)
741 #undef VMRG_DO
742 #undef VMRG
743 #undef MRGHI
744 #undef MRGLO
745
746 void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
747                      ppc_avr_t *b, ppc_avr_t *c)
748 {
749     int32_t prod[16];
750     int i;
751
752     for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
753         prod[i] = (int32_t)a->s8[i] * b->u8[i];
754     }
755
756     VECTOR_FOR_INORDER_I(i, s32) {
757         r->s32[i] = c->s32[i] + prod[4 * i] + prod[4 * i + 1] +
758             prod[4 * i + 2] + prod[4 * i + 3];
759     }
760 }
761
762 void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
763                      ppc_avr_t *b, ppc_avr_t *c)
764 {
765     int32_t prod[8];
766     int i;
767
768     for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
769         prod[i] = a->s16[i] * b->s16[i];
770     }
771
772     VECTOR_FOR_INORDER_I(i, s32) {
773         r->s32[i] = c->s32[i] + prod[2 * i] + prod[2 * i + 1];
774     }
775 }
776
777 void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
778                      ppc_avr_t *b, ppc_avr_t *c)
779 {
780     int32_t prod[8];
781     int i;
782     int sat = 0;
783
784     for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
785         prod[i] = (int32_t)a->s16[i] * b->s16[i];
786     }
787
788     VECTOR_FOR_INORDER_I(i, s32) {
789         int64_t t = (int64_t)c->s32[i] + prod[2 * i] + prod[2 * i + 1];
790
791         r->u32[i] = cvtsdsw(t, &sat);
792     }
793
794     if (sat) {
795         env->vscr |= (1 << VSCR_SAT);
796     }
797 }
798
799 void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
800                      ppc_avr_t *b, ppc_avr_t *c)
801 {
802     uint16_t prod[16];
803     int i;
804
805     for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
806         prod[i] = a->u8[i] * b->u8[i];
807     }
808
809     VECTOR_FOR_INORDER_I(i, u32) {
810         r->u32[i] = c->u32[i] + prod[4 * i] + prod[4 * i + 1] +
811             prod[4 * i + 2] + prod[4 * i + 3];
812     }
813 }
814
815 void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
816                      ppc_avr_t *b, ppc_avr_t *c)
817 {
818     uint32_t prod[8];
819     int i;
820
821     for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
822         prod[i] = a->u16[i] * b->u16[i];
823     }
824
825     VECTOR_FOR_INORDER_I(i, u32) {
826         r->u32[i] = c->u32[i] + prod[2 * i] + prod[2 * i + 1];
827     }
828 }
829
830 void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
831                      ppc_avr_t *b, ppc_avr_t *c)
832 {
833     uint32_t prod[8];
834     int i;
835     int sat = 0;
836
837     for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
838         prod[i] = a->u16[i] * b->u16[i];
839     }
840
841     VECTOR_FOR_INORDER_I(i, s32) {
842         uint64_t t = (uint64_t)c->u32[i] + prod[2 * i] + prod[2 * i + 1];
843
844         r->u32[i] = cvtuduw(t, &sat);
845     }
846
847     if (sat) {
848         env->vscr |= (1 << VSCR_SAT);
849     }
850 }
851
852 #define VMUL_DO(name, mul_element, prod_element, evenp)                 \
853     void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)       \
854     {                                                                   \
855         int i;                                                          \
856                                                                         \
857         VECTOR_FOR_INORDER_I(i, prod_element) {                         \
858             if (evenp) {                                                \
859                 r->prod_element[i] = a->mul_element[i * 2 + HI_IDX] *   \
860                     b->mul_element[i * 2 + HI_IDX];                     \
861             } else {                                                    \
862                 r->prod_element[i] = a->mul_element[i * 2 + LO_IDX] *   \
863                     b->mul_element[i * 2 + LO_IDX];                     \
864             }                                                           \
865         }                                                               \
866     }
867 #define VMUL(suffix, mul_element, prod_element)         \
868     VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
869     VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
870 VMUL(sb, s8, s16)
871 VMUL(sh, s16, s32)
872 VMUL(ub, u8, u16)
873 VMUL(uh, u16, u32)
874 #undef VMUL_DO
875 #undef VMUL
876
877 void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
878                   ppc_avr_t *c)
879 {
880     ppc_avr_t result;
881     int i;
882
883     VECTOR_FOR_INORDER_I(i, u8) {
884         int s = c->u8[i] & 0x1f;
885 #if defined(HOST_WORDS_BIGENDIAN)
886         int index = s & 0xf;
887 #else
888         int index = 15 - (s & 0xf);
889 #endif
890
891         if (s & 0x10) {
892             result.u8[i] = b->u8[index];
893         } else {
894             result.u8[i] = a->u8[index];
895         }
896     }
897     *r = result;
898 }
899
900 #if defined(HOST_WORDS_BIGENDIAN)
901 #define PKBIG 1
902 #else
903 #define PKBIG 0
904 #endif
905 void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
906 {
907     int i, j;
908     ppc_avr_t result;
909 #if defined(HOST_WORDS_BIGENDIAN)
910     const ppc_avr_t *x[2] = { a, b };
911 #else
912     const ppc_avr_t *x[2] = { b, a };
913 #endif
914
915     VECTOR_FOR_INORDER_I(i, u64) {
916         VECTOR_FOR_INORDER_I(j, u32) {
917             uint32_t e = x[i]->u32[j];
918
919             result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
920                                  ((e >> 6) & 0x3e0) |
921                                  ((e >> 3) & 0x1f));
922         }
923     }
924     *r = result;
925 }
926
927 #define VPK(suffix, from, to, cvt, dosat)                               \
928     void helper_vpk##suffix(CPUPPCState *env, ppc_avr_t *r,             \
929                             ppc_avr_t *a, ppc_avr_t *b)                 \
930     {                                                                   \
931         int i;                                                          \
932         int sat = 0;                                                    \
933         ppc_avr_t result;                                               \
934         ppc_avr_t *a0 = PKBIG ? a : b;                                  \
935         ppc_avr_t *a1 = PKBIG ? b : a;                                  \
936                                                                         \
937         VECTOR_FOR_INORDER_I(i, from) {                                 \
938             result.to[i] = cvt(a0->from[i], &sat);                      \
939             result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
940         }                                                               \
941         *r = result;                                                    \
942         if (dosat && sat) {                                             \
943             env->vscr |= (1 << VSCR_SAT);                               \
944         }                                                               \
945     }
946 #define I(x, y) (x)
947 VPK(shss, s16, s8, cvtshsb, 1)
948 VPK(shus, s16, u8, cvtshub, 1)
949 VPK(swss, s32, s16, cvtswsh, 1)
950 VPK(swus, s32, u16, cvtswuh, 1)
951 VPK(uhus, u16, u8, cvtuhub, 1)
952 VPK(uwus, u32, u16, cvtuwuh, 1)
953 VPK(uhum, u16, u8, I, 0)
954 VPK(uwum, u32, u16, I, 0)
955 #undef I
956 #undef VPK
957 #undef PKBIG
958
959 void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
960 {
961     int i;
962
963     for (i = 0; i < ARRAY_SIZE(r->f); i++) {
964         r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
965     }
966 }
967
968 #define VRFI(suffix, rounding)                                  \
969     void helper_vrfi##suffix(CPUPPCState *env, ppc_avr_t *r,    \
970                              ppc_avr_t *b)                      \
971     {                                                           \
972         int i;                                                  \
973         float_status s = env->vec_status;                       \
974                                                                 \
975         set_float_rounding_mode(rounding, &s);                  \
976         for (i = 0; i < ARRAY_SIZE(r->f); i++) {                \
977             r->f[i] = float32_round_to_int (b->f[i], &s);       \
978         }                                                       \
979     }
980 VRFI(n, float_round_nearest_even)
981 VRFI(m, float_round_down)
982 VRFI(p, float_round_up)
983 VRFI(z, float_round_to_zero)
984 #undef VRFI
985
986 #define VROTATE(suffix, element)                                        \
987     void helper_vrl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
988     {                                                                   \
989         int i;                                                          \
990                                                                         \
991         for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
992             unsigned int mask = ((1 <<                                  \
993                                   (3 + (sizeof(a->element[0]) >> 1)))   \
994                                  - 1);                                  \
995             unsigned int shift = b->element[i] & mask;                  \
996             r->element[i] = (a->element[i] << shift) |                  \
997                 (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
998         }                                                               \
999     }
1000 VROTATE(b, u8)
1001 VROTATE(h, u16)
1002 VROTATE(w, u32)
1003 #undef VROTATE
1004
1005 void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1006 {
1007     int i;
1008
1009     for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1010         float32 t = float32_sqrt(b->f[i], &env->vec_status);
1011
1012         r->f[i] = float32_div(float32_one, t, &env->vec_status);
1013     }
1014 }
1015
1016 void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
1017                  ppc_avr_t *c)
1018 {
1019     r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
1020     r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
1021 }
1022
1023 void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1024 {
1025     int i;
1026
1027     for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1028         r->f[i] = float32_exp2(b->f[i], &env->vec_status);
1029     }
1030 }
1031
1032 void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1033 {
1034     int i;
1035
1036     for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1037         r->f[i] = float32_log2(b->f[i], &env->vec_status);
1038     }
1039 }
1040
1041 #if defined(HOST_WORDS_BIGENDIAN)
1042 #define LEFT 0
1043 #define RIGHT 1
1044 #else
1045 #define LEFT 1
1046 #define RIGHT 0
1047 #endif
1048 /* The specification says that the results are undefined if all of the
1049  * shift counts are not identical.  We check to make sure that they are
1050  * to conform to what real hardware appears to do.  */
1051 #define VSHIFT(suffix, leftp)                                           \
1052     void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
1053     {                                                                   \
1054         int shift = b->u8[LO_IDX*15] & 0x7;                             \
1055         int doit = 1;                                                   \
1056         int i;                                                          \
1057                                                                         \
1058         for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
1059             doit = doit && ((b->u8[i] & 0x7) == shift);                 \
1060         }                                                               \
1061         if (doit) {                                                     \
1062             if (shift == 0) {                                           \
1063                 *r = *a;                                                \
1064             } else if (leftp) {                                         \
1065                 uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
1066                                                                         \
1067                 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
1068                 r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
1069             } else {                                                    \
1070                 uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
1071                                                                         \
1072                 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
1073                 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
1074             }                                                           \
1075         }                                                               \
1076     }
1077 VSHIFT(l, LEFT)
1078 VSHIFT(r, RIGHT)
1079 #undef VSHIFT
1080 #undef LEFT
1081 #undef RIGHT
1082
1083 #define VSL(suffix, element)                                            \
1084     void helper_vsl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
1085     {                                                                   \
1086         int i;                                                          \
1087                                                                         \
1088         for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
1089             unsigned int mask = ((1 <<                                  \
1090                                   (3 + (sizeof(a->element[0]) >> 1)))   \
1091                                  - 1);                                  \
1092             unsigned int shift = b->element[i] & mask;                  \
1093                                                                         \
1094             r->element[i] = a->element[i] << shift;                     \
1095         }                                                               \
1096     }
1097 VSL(b, u8)
1098 VSL(h, u16)
1099 VSL(w, u32)
1100 #undef VSL
1101
1102 void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
1103 {
1104     int sh = shift & 0xf;
1105     int i;
1106     ppc_avr_t result;
1107
1108 #if defined(HOST_WORDS_BIGENDIAN)
1109     for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
1110         int index = sh + i;
1111         if (index > 0xf) {
1112             result.u8[i] = b->u8[index - 0x10];
1113         } else {
1114             result.u8[i] = a->u8[index];
1115         }
1116     }
1117 #else
1118     for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
1119         int index = (16 - sh) + i;
1120         if (index > 0xf) {
1121             result.u8[i] = a->u8[index - 0x10];
1122         } else {
1123             result.u8[i] = b->u8[index];
1124         }
1125     }
1126 #endif
1127     *r = result;
1128 }
1129
1130 void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1131 {
1132     int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
1133
1134 #if defined(HOST_WORDS_BIGENDIAN)
1135     memmove(&r->u8[0], &a->u8[sh], 16 - sh);
1136     memset(&r->u8[16-sh], 0, sh);
1137 #else
1138     memmove(&r->u8[sh], &a->u8[0], 16 - sh);
1139     memset(&r->u8[0], 0, sh);
1140 #endif
1141 }
1142
1143 /* Experimental testing shows that hardware masks the immediate.  */
1144 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
1145 #if defined(HOST_WORDS_BIGENDIAN)
1146 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
1147 #else
1148 #define SPLAT_ELEMENT(element)                                  \
1149     (ARRAY_SIZE(r->element) - 1 - _SPLAT_MASKED(element))
1150 #endif
1151 #define VSPLT(suffix, element)                                          \
1152     void helper_vsplt##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
1153     {                                                                   \
1154         uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
1155         int i;                                                          \
1156                                                                         \
1157         for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
1158             r->element[i] = s;                                          \
1159         }                                                               \
1160     }
1161 VSPLT(b, u8)
1162 VSPLT(h, u16)
1163 VSPLT(w, u32)
1164 #undef VSPLT
1165 #undef SPLAT_ELEMENT
1166 #undef _SPLAT_MASKED
1167
1168 #define VSPLTI(suffix, element, splat_type)                     \
1169     void helper_vspltis##suffix(ppc_avr_t *r, uint32_t splat)   \
1170     {                                                           \
1171         splat_type x = (int8_t)(splat << 3) >> 3;               \
1172         int i;                                                  \
1173                                                                 \
1174         for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
1175             r->element[i] = x;                                  \
1176         }                                                       \
1177     }
1178 VSPLTI(b, s8, int8_t)
1179 VSPLTI(h, s16, int16_t)
1180 VSPLTI(w, s32, int32_t)
1181 #undef VSPLTI
1182
1183 #define VSR(suffix, element)                                            \
1184     void helper_vsr##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
1185     {                                                                   \
1186         int i;                                                          \
1187                                                                         \
1188         for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
1189             unsigned int mask = ((1 <<                                  \
1190                                   (3 + (sizeof(a->element[0]) >> 1)))   \
1191                                  - 1);                                  \
1192             unsigned int shift = b->element[i] & mask;                  \
1193                                                                         \
1194             r->element[i] = a->element[i] >> shift;                     \
1195         }                                                               \
1196     }
1197 VSR(ab, s8)
1198 VSR(ah, s16)
1199 VSR(aw, s32)
1200 VSR(b, u8)
1201 VSR(h, u16)
1202 VSR(w, u32)
1203 #undef VSR
1204
1205 void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1206 {
1207     int sh = (b->u8[LO_IDX * 0xf] >> 3) & 0xf;
1208
1209 #if defined(HOST_WORDS_BIGENDIAN)
1210     memmove(&r->u8[sh], &a->u8[0], 16 - sh);
1211     memset(&r->u8[0], 0, sh);
1212 #else
1213     memmove(&r->u8[0], &a->u8[sh], 16 - sh);
1214     memset(&r->u8[16 - sh], 0, sh);
1215 #endif
1216 }
1217
1218 void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1219 {
1220     int i;
1221
1222     for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1223         r->u32[i] = a->u32[i] >= b->u32[i];
1224     }
1225 }
1226
1227 void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1228 {
1229     int64_t t;
1230     int i, upper;
1231     ppc_avr_t result;
1232     int sat = 0;
1233
1234 #if defined(HOST_WORDS_BIGENDIAN)
1235     upper = ARRAY_SIZE(r->s32)-1;
1236 #else
1237     upper = 0;
1238 #endif
1239     t = (int64_t)b->s32[upper];
1240     for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1241         t += a->s32[i];
1242         result.s32[i] = 0;
1243     }
1244     result.s32[upper] = cvtsdsw(t, &sat);
1245     *r = result;
1246
1247     if (sat) {
1248         env->vscr |= (1 << VSCR_SAT);
1249     }
1250 }
1251
1252 void helper_vsum2sws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1253 {
1254     int i, j, upper;
1255     ppc_avr_t result;
1256     int sat = 0;
1257
1258 #if defined(HOST_WORDS_BIGENDIAN)
1259     upper = 1;
1260 #else
1261     upper = 0;
1262 #endif
1263     for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
1264         int64_t t = (int64_t)b->s32[upper + i * 2];
1265
1266         result.u64[i] = 0;
1267         for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
1268             t += a->s32[2 * i + j];
1269         }
1270         result.s32[upper + i * 2] = cvtsdsw(t, &sat);
1271     }
1272
1273     *r = result;
1274     if (sat) {
1275         env->vscr |= (1 << VSCR_SAT);
1276     }
1277 }
1278
1279 void helper_vsum4sbs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1280 {
1281     int i, j;
1282     int sat = 0;
1283
1284     for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1285         int64_t t = (int64_t)b->s32[i];
1286
1287         for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
1288             t += a->s8[4 * i + j];
1289         }
1290         r->s32[i] = cvtsdsw(t, &sat);
1291     }
1292
1293     if (sat) {
1294         env->vscr |= (1 << VSCR_SAT);
1295     }
1296 }
1297
1298 void helper_vsum4shs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1299 {
1300     int sat = 0;
1301     int i;
1302
1303     for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1304         int64_t t = (int64_t)b->s32[i];
1305
1306         t += a->s16[2 * i] + a->s16[2 * i + 1];
1307         r->s32[i] = cvtsdsw(t, &sat);
1308     }
1309
1310     if (sat) {
1311         env->vscr |= (1 << VSCR_SAT);
1312     }
1313 }
1314
1315 void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1316 {
1317     int i, j;
1318     int sat = 0;
1319
1320     for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1321         uint64_t t = (uint64_t)b->u32[i];
1322
1323         for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
1324             t += a->u8[4 * i + j];
1325         }
1326         r->u32[i] = cvtuduw(t, &sat);
1327     }
1328
1329     if (sat) {
1330         env->vscr |= (1 << VSCR_SAT);
1331     }
1332 }
1333
1334 #if defined(HOST_WORDS_BIGENDIAN)
1335 #define UPKHI 1
1336 #define UPKLO 0
1337 #else
1338 #define UPKHI 0
1339 #define UPKLO 1
1340 #endif
1341 #define VUPKPX(suffix, hi)                                              \
1342     void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b)                \
1343     {                                                                   \
1344         int i;                                                          \
1345         ppc_avr_t result;                                               \
1346                                                                         \
1347         for (i = 0; i < ARRAY_SIZE(r->u32); i++) {                      \
1348             uint16_t e = b->u16[hi ? i : i+4];                          \
1349             uint8_t a = (e >> 15) ? 0xff : 0;                           \
1350             uint8_t r = (e >> 10) & 0x1f;                               \
1351             uint8_t g = (e >> 5) & 0x1f;                                \
1352             uint8_t b = e & 0x1f;                                       \
1353                                                                         \
1354             result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
1355         }                                                               \
1356         *r = result;                                                    \
1357     }
1358 VUPKPX(lpx, UPKLO)
1359 VUPKPX(hpx, UPKHI)
1360 #undef VUPKPX
1361
1362 #define VUPK(suffix, unpacked, packee, hi)                              \
1363     void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b)                \
1364     {                                                                   \
1365         int i;                                                          \
1366         ppc_avr_t result;                                               \
1367                                                                         \
1368         if (hi) {                                                       \
1369             for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
1370                 result.unpacked[i] = b->packee[i];                      \
1371             }                                                           \
1372         } else {                                                        \
1373             for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \
1374                  i++) {                                                 \
1375                 result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
1376             }                                                           \
1377         }                                                               \
1378         *r = result;                                                    \
1379     }
1380 VUPK(hsb, s16, s8, UPKHI)
1381 VUPK(hsh, s32, s16, UPKHI)
1382 VUPK(lsb, s16, s8, UPKLO)
1383 VUPK(lsh, s32, s16, UPKLO)
1384 #undef VUPK
1385 #undef UPKHI
1386 #undef UPKLO
1387
1388 #undef VECTOR_FOR_INORDER_I
1389 #undef HI_IDX
1390 #undef LO_IDX
1391
1392 /*****************************************************************************/
1393 /* SPE extension helpers */
1394 /* Use a table to make this quicker */
1395 static const uint8_t hbrev[16] = {
1396     0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1397     0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1398 };
1399
1400 static inline uint8_t byte_reverse(uint8_t val)
1401 {
1402     return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1403 }
1404
1405 static inline uint32_t word_reverse(uint32_t val)
1406 {
1407     return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1408         (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1409 }
1410
1411 #define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */
1412 target_ulong helper_brinc(target_ulong arg1, target_ulong arg2)
1413 {
1414     uint32_t a, b, d, mask;
1415
1416     mask = UINT32_MAX >> (32 - MASKBITS);
1417     a = arg1 & mask;
1418     b = arg2 & mask;
1419     d = word_reverse(1 + word_reverse(a | ~b));
1420     return (arg1 & ~mask) | (d & b);
1421 }
1422
1423 uint32_t helper_cntlsw32(uint32_t val)
1424 {
1425     if (val & 0x80000000) {
1426         return clz32(~val);
1427     } else {
1428         return clz32(val);
1429     }
1430 }
1431
1432 uint32_t helper_cntlzw32(uint32_t val)
1433 {
1434     return clz32(val);
1435 }
1436
1437 /* 440 specific */
1438 target_ulong helper_dlmzb(CPUPPCState *env, target_ulong high,
1439                           target_ulong low, uint32_t update_Rc)
1440 {
1441     target_ulong mask;
1442     int i;
1443
1444     i = 1;
1445     for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1446         if ((high & mask) == 0) {
1447             if (update_Rc) {
1448                 env->crf[0] = 0x4;
1449             }
1450             goto done;
1451         }
1452         i++;
1453     }
1454     for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1455         if ((low & mask) == 0) {
1456             if (update_Rc) {
1457                 env->crf[0] = 0x8;
1458             }
1459             goto done;
1460         }
1461         i++;
1462     }
1463     if (update_Rc) {
1464         env->crf[0] = 0x2;
1465     }
1466  done:
1467     env->xer = (env->xer & ~0x7F) | i;
1468     if (update_Rc) {
1469         env->crf[0] |= xer_so;
1470     }
1471     return i;
1472 }
This page took 0.15043 seconds and 4 git commands to generate.