2 * ARM NEON vector operations.
4 * Copyright (c) 2007, 2008 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licensed under the GNU GPL v2.
16 #define SIGNBIT (uint32_t)0x80000000
17 #define SIGNBIT64 ((uint64_t)1 << 63)
19 #define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] |= CPSR_Q
21 #define NEON_TYPE1(name, type) \
26 #ifdef HOST_WORDS_BIGENDIAN
27 #define NEON_TYPE2(name, type) \
33 #define NEON_TYPE4(name, type) \
42 #define NEON_TYPE2(name, type) \
48 #define NEON_TYPE4(name, type) \
58 NEON_TYPE4(s8, int8_t)
59 NEON_TYPE4(u8, uint8_t)
60 NEON_TYPE2(s16, int16_t)
61 NEON_TYPE2(u16, uint16_t)
62 NEON_TYPE1(s32, int32_t)
63 NEON_TYPE1(u32, uint32_t)
68 /* Copy from a uint32_t to a vector structure type. */
69 #define NEON_UNPACK(vtype, dest, val) do { \
78 /* Copy from a vector structure type to a uint32_t. */
79 #define NEON_PACK(vtype, dest, val) do { \
89 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
91 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
92 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
94 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
95 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
96 NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
97 NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
99 #define NEON_VOP_BODY(vtype, n) \
105 NEON_UNPACK(vtype, vsrc1, arg1); \
106 NEON_UNPACK(vtype, vsrc2, arg2); \
108 NEON_PACK(vtype, res, vdest); \
112 #define NEON_VOP(name, vtype, n) \
113 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
114 NEON_VOP_BODY(vtype, n)
116 #define NEON_VOP_ENV(name, vtype, n) \
117 uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \
118 NEON_VOP_BODY(vtype, n)
120 /* Pairwise operations. */
121 /* For 32-bit elements each segment only contains a single element, so
122 the elementwise and pairwise operations are the same. */
124 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
125 NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
127 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
128 NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
129 NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
130 NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
132 #define NEON_POP(name, vtype, n) \
133 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
139 NEON_UNPACK(vtype, vsrc1, arg1); \
140 NEON_UNPACK(vtype, vsrc2, arg2); \
142 NEON_PACK(vtype, res, vdest); \
146 /* Unary operators. */
147 #define NEON_VOP1(name, vtype, n) \
148 uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
152 NEON_UNPACK(vtype, vsrc1, arg); \
154 NEON_PACK(vtype, arg, vdest); \
159 #define NEON_USAT(dest, src1, src2, type) do { \
160 uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
161 if (tmp != (type)tmp) { \
167 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
168 NEON_VOP_ENV(qadd_u8, neon_u8, 4)
170 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
171 NEON_VOP_ENV(qadd_u16, neon_u16, 2)
175 uint32_t HELPER(neon_qadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
177 uint32_t res = a + b;
185 uint64_t HELPER(neon_qadd_u64)(CPUARMState *env, uint64_t src1, uint64_t src2)
197 #define NEON_SSAT(dest, src1, src2, type) do { \
198 int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
199 if (tmp != (type)tmp) { \
202 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
204 tmp = 1 << (sizeof(type) * 8 - 1); \
209 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
210 NEON_VOP_ENV(qadd_s8, neon_s8, 4)
212 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
213 NEON_VOP_ENV(qadd_s16, neon_s16, 2)
217 uint32_t HELPER(neon_qadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
219 uint32_t res = a + b;
220 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
222 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
227 uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
232 if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
234 res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
239 #define NEON_USAT(dest, src1, src2, type) do { \
240 uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
241 if (tmp != (type)tmp) { \
247 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
248 NEON_VOP_ENV(qsub_u8, neon_u8, 4)
250 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
251 NEON_VOP_ENV(qsub_u16, neon_u16, 2)
255 uint32_t HELPER(neon_qsub_u32)(CPUARMState *env, uint32_t a, uint32_t b)
257 uint32_t res = a - b;
265 uint64_t HELPER(neon_qsub_u64)(CPUARMState *env, uint64_t src1, uint64_t src2)
278 #define NEON_SSAT(dest, src1, src2, type) do { \
279 int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
280 if (tmp != (type)tmp) { \
283 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
285 tmp = 1 << (sizeof(type) * 8 - 1); \
290 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
291 NEON_VOP_ENV(qsub_s8, neon_s8, 4)
293 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
294 NEON_VOP_ENV(qsub_s16, neon_s16, 2)
298 uint32_t HELPER(neon_qsub_s32)(CPUARMState *env, uint32_t a, uint32_t b)
300 uint32_t res = a - b;
301 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
303 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
308 uint64_t HELPER(neon_qsub_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
313 if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
315 res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
320 #define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
321 NEON_VOP(hadd_s8, neon_s8, 4)
322 NEON_VOP(hadd_u8, neon_u8, 4)
323 NEON_VOP(hadd_s16, neon_s16, 2)
324 NEON_VOP(hadd_u16, neon_u16, 2)
327 int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2)
331 dest = (src1 >> 1) + (src2 >> 1);
337 uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2)
341 dest = (src1 >> 1) + (src2 >> 1);
347 #define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
348 NEON_VOP(rhadd_s8, neon_s8, 4)
349 NEON_VOP(rhadd_u8, neon_u8, 4)
350 NEON_VOP(rhadd_s16, neon_s16, 2)
351 NEON_VOP(rhadd_u16, neon_u16, 2)
354 int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2)
358 dest = (src1 >> 1) + (src2 >> 1);
359 if ((src1 | src2) & 1)
364 uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2)
368 dest = (src1 >> 1) + (src2 >> 1);
369 if ((src1 | src2) & 1)
374 #define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
375 NEON_VOP(hsub_s8, neon_s8, 4)
376 NEON_VOP(hsub_u8, neon_u8, 4)
377 NEON_VOP(hsub_s16, neon_s16, 2)
378 NEON_VOP(hsub_u16, neon_u16, 2)
381 int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2)
385 dest = (src1 >> 1) - (src2 >> 1);
386 if ((~src1) & src2 & 1)
391 uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
395 dest = (src1 >> 1) - (src2 >> 1);
396 if ((~src1) & src2 & 1)
401 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
402 NEON_VOP(cgt_s8, neon_s8, 4)
403 NEON_VOP(cgt_u8, neon_u8, 4)
404 NEON_VOP(cgt_s16, neon_s16, 2)
405 NEON_VOP(cgt_u16, neon_u16, 2)
406 NEON_VOP(cgt_s32, neon_s32, 1)
407 NEON_VOP(cgt_u32, neon_u32, 1)
410 #define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
411 NEON_VOP(cge_s8, neon_s8, 4)
412 NEON_VOP(cge_u8, neon_u8, 4)
413 NEON_VOP(cge_s16, neon_s16, 2)
414 NEON_VOP(cge_u16, neon_u16, 2)
415 NEON_VOP(cge_s32, neon_s32, 1)
416 NEON_VOP(cge_u32, neon_u32, 1)
419 #define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
420 NEON_VOP(min_s8, neon_s8, 4)
421 NEON_VOP(min_u8, neon_u8, 4)
422 NEON_VOP(min_s16, neon_s16, 2)
423 NEON_VOP(min_u16, neon_u16, 2)
424 NEON_VOP(min_s32, neon_s32, 1)
425 NEON_VOP(min_u32, neon_u32, 1)
426 NEON_POP(pmin_s8, neon_s8, 4)
427 NEON_POP(pmin_u8, neon_u8, 4)
428 NEON_POP(pmin_s16, neon_s16, 2)
429 NEON_POP(pmin_u16, neon_u16, 2)
432 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
433 NEON_VOP(max_s8, neon_s8, 4)
434 NEON_VOP(max_u8, neon_u8, 4)
435 NEON_VOP(max_s16, neon_s16, 2)
436 NEON_VOP(max_u16, neon_u16, 2)
437 NEON_VOP(max_s32, neon_s32, 1)
438 NEON_VOP(max_u32, neon_u32, 1)
439 NEON_POP(pmax_s8, neon_s8, 4)
440 NEON_POP(pmax_u8, neon_u8, 4)
441 NEON_POP(pmax_s16, neon_s16, 2)
442 NEON_POP(pmax_u16, neon_u16, 2)
445 #define NEON_FN(dest, src1, src2) \
446 dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
447 NEON_VOP(abd_s8, neon_s8, 4)
448 NEON_VOP(abd_u8, neon_u8, 4)
449 NEON_VOP(abd_s16, neon_s16, 2)
450 NEON_VOP(abd_u16, neon_u16, 2)
451 NEON_VOP(abd_s32, neon_s32, 1)
452 NEON_VOP(abd_u32, neon_u32, 1)
455 #define NEON_FN(dest, src1, src2) do { \
457 tmp = (int8_t)src2; \
458 if (tmp >= (ssize_t)sizeof(src1) * 8 || \
459 tmp <= -(ssize_t)sizeof(src1) * 8) { \
461 } else if (tmp < 0) { \
462 dest = src1 >> -tmp; \
464 dest = src1 << tmp; \
466 NEON_VOP(shl_u8, neon_u8, 4)
467 NEON_VOP(shl_u16, neon_u16, 2)
468 NEON_VOP(shl_u32, neon_u32, 1)
471 uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
473 int8_t shift = (int8_t)shiftop;
474 if (shift >= 64 || shift <= -64) {
476 } else if (shift < 0) {
484 #define NEON_FN(dest, src1, src2) do { \
486 tmp = (int8_t)src2; \
487 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
489 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
490 dest = src1 >> (sizeof(src1) * 8 - 1); \
491 } else if (tmp < 0) { \
492 dest = src1 >> -tmp; \
494 dest = src1 << tmp; \
496 NEON_VOP(shl_s8, neon_s8, 4)
497 NEON_VOP(shl_s16, neon_s16, 2)
498 NEON_VOP(shl_s32, neon_s32, 1)
501 uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop)
503 int8_t shift = (int8_t)shiftop;
507 } else if (shift <= -64) {
509 } else if (shift < 0) {
517 #define NEON_FN(dest, src1, src2) do { \
519 tmp = (int8_t)src2; \
520 if ((tmp >= (ssize_t)sizeof(src1) * 8) \
521 || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \
523 } else if (tmp < 0) { \
524 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
526 dest = src1 << tmp; \
528 NEON_VOP(rshl_s8, neon_s8, 4)
529 NEON_VOP(rshl_s16, neon_s16, 2)
532 /* The addition of the rounding constant may overflow, so we use an
533 * intermediate 64 bit accumulator. */
534 uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
537 int32_t val = (int32_t)valop;
538 int8_t shift = (int8_t)shiftop;
539 if ((shift >= 32) || (shift <= -32)) {
541 } else if (shift < 0) {
542 int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
543 dest = big_dest >> -shift;
550 /* Handling addition overflow with 64 bit input values is more
551 * tricky than with 32 bit values. */
552 uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
554 int8_t shift = (int8_t)shiftop;
556 if ((shift >= 64) || (shift <= -64)) {
558 } else if (shift < 0) {
559 val >>= (-shift - 1);
560 if (val == INT64_MAX) {
561 /* In this case, it means that the rounding constant is 1,
562 * and the addition would overflow. Return the actual
563 * result directly. */
564 val = 0x4000000000000000LL;
575 #define NEON_FN(dest, src1, src2) do { \
577 tmp = (int8_t)src2; \
578 if (tmp >= (ssize_t)sizeof(src1) * 8 || \
579 tmp < -(ssize_t)sizeof(src1) * 8) { \
581 } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
582 dest = src1 >> (-tmp - 1); \
583 } else if (tmp < 0) { \
584 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
586 dest = src1 << tmp; \
588 NEON_VOP(rshl_u8, neon_u8, 4)
589 NEON_VOP(rshl_u16, neon_u16, 2)
592 /* The addition of the rounding constant may overflow, so we use an
593 * intermediate 64 bit accumulator. */
594 uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
597 int8_t shift = (int8_t)shiftop;
598 if (shift >= 32 || shift < -32) {
600 } else if (shift == -32) {
602 } else if (shift < 0) {
603 uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
604 dest = big_dest >> -shift;
611 /* Handling addition overflow with 64 bit input values is more
612 * tricky than with 32 bit values. */
613 uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
615 int8_t shift = (uint8_t)shiftop;
616 if (shift >= 64 || shift < -64) {
618 } else if (shift == -64) {
619 /* Rounding a 1-bit result just preserves that bit. */
621 } else if (shift < 0) {
622 val >>= (-shift - 1);
623 if (val == UINT64_MAX) {
624 /* In this case, it means that the rounding constant is 1,
625 * and the addition would overflow. Return the actual
626 * result directly. */
627 val = 0x8000000000000000ULL;
638 #define NEON_FN(dest, src1, src2) do { \
640 tmp = (int8_t)src2; \
641 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
648 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
650 } else if (tmp < 0) { \
651 dest = src1 >> -tmp; \
653 dest = src1 << tmp; \
654 if ((dest >> tmp) != src1) { \
659 NEON_VOP_ENV(qshl_u8, neon_u8, 4)
660 NEON_VOP_ENV(qshl_u16, neon_u16, 2)
661 NEON_VOP_ENV(qshl_u32, neon_u32, 1)
664 uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
666 int8_t shift = (int8_t)shiftop;
672 } else if (shift <= -64) {
674 } else if (shift < 0) {
679 if ((val >> shift) != tmp) {
687 #define NEON_FN(dest, src1, src2) do { \
689 tmp = (int8_t)src2; \
690 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
693 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
700 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
702 } else if (tmp < 0) { \
703 dest = src1 >> -tmp; \
705 dest = src1 << tmp; \
706 if ((dest >> tmp) != src1) { \
708 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
714 NEON_VOP_ENV(qshl_s8, neon_s8, 4)
715 NEON_VOP_ENV(qshl_s16, neon_s16, 2)
716 NEON_VOP_ENV(qshl_s32, neon_s32, 1)
719 uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
721 int8_t shift = (uint8_t)shiftop;
726 val = (val >> 63) ^ ~SIGNBIT64;
728 } else if (shift <= -64) {
730 } else if (shift < 0) {
735 if ((val >> shift) != tmp) {
737 val = (tmp >> 63) ^ ~SIGNBIT64;
743 #define NEON_FN(dest, src1, src2) do { \
744 if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
749 tmp = (int8_t)src2; \
750 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
757 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
759 } else if (tmp < 0) { \
760 dest = src1 >> -tmp; \
762 dest = src1 << tmp; \
763 if ((dest >> tmp) != src1) { \
769 NEON_VOP_ENV(qshlu_s8, neon_u8, 4)
770 NEON_VOP_ENV(qshlu_s16, neon_u16, 2)
773 uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
775 if ((int32_t)valop < 0) {
779 return helper_neon_qshl_u32(env, valop, shiftop);
782 uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
784 if ((int64_t)valop < 0) {
788 return helper_neon_qshl_u64(env, valop, shiftop);
791 #define NEON_FN(dest, src1, src2) do { \
793 tmp = (int8_t)src2; \
794 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
801 } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \
803 } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
804 dest = src1 >> (sizeof(src1) * 8 - 1); \
805 } else if (tmp < 0) { \
806 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
808 dest = src1 << tmp; \
809 if ((dest >> tmp) != src1) { \
814 NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
815 NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
818 /* The addition of the rounding constant may overflow, so we use an
819 * intermediate 64 bit accumulator. */
820 uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop)
823 int8_t shift = (int8_t)shiftop;
831 } else if (shift < -32) {
833 } else if (shift == -32) {
835 } else if (shift < 0) {
836 uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
837 dest = big_dest >> -shift;
840 if ((dest >> shift) != val) {
848 /* Handling addition overflow with 64 bit input values is more
849 * tricky than with 32 bit values. */
850 uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
852 int8_t shift = (int8_t)shiftop;
858 } else if (shift < -64) {
860 } else if (shift == -64) {
862 } else if (shift < 0) {
863 val >>= (-shift - 1);
864 if (val == UINT64_MAX) {
865 /* In this case, it means that the rounding constant is 1,
866 * and the addition would overflow. Return the actual
867 * result directly. */
868 val = 0x8000000000000000ULL;
876 if ((val >> shift) != tmp) {
884 #define NEON_FN(dest, src1, src2) do { \
886 tmp = (int8_t)src2; \
887 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
890 dest = (1 << (sizeof(src1) * 8 - 1)); \
897 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
899 } else if (tmp < 0) { \
900 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
902 dest = src1 << tmp; \
903 if ((dest >> tmp) != src1) { \
905 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
911 NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
912 NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
915 /* The addition of the rounding constant may overflow, so we use an
916 * intermediate 64 bit accumulator. */
917 uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
920 int32_t val = (int32_t)valop;
921 int8_t shift = (int8_t)shiftop;
925 dest = (val >> 31) ^ ~SIGNBIT;
929 } else if (shift <= -32) {
931 } else if (shift < 0) {
932 int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
933 dest = big_dest >> -shift;
936 if ((dest >> shift) != val) {
938 dest = (val >> 31) ^ ~SIGNBIT;
944 /* Handling addition overflow with 64 bit input values is more
945 * tricky than with 32 bit values. */
946 uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
948 int8_t shift = (uint8_t)shiftop;
954 val = (val >> 63) ^ ~SIGNBIT64;
956 } else if (shift <= -64) {
958 } else if (shift < 0) {
959 val >>= (-shift - 1);
960 if (val == INT64_MAX) {
961 /* In this case, it means that the rounding constant is 1,
962 * and the addition would overflow. Return the actual
963 * result directly. */
964 val = 0x4000000000000000ULL;
972 if ((val >> shift) != tmp) {
974 val = (tmp >> 63) ^ ~SIGNBIT64;
980 uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
983 mask = (a ^ b) & 0x80808080u;
986 return (a + b) ^ mask;
989 uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b)
992 mask = (a ^ b) & 0x80008000u;
995 return (a + b) ^ mask;
998 #define NEON_FN(dest, src1, src2) dest = src1 + src2
999 NEON_POP(padd_u8, neon_u8, 4)
1000 NEON_POP(padd_u16, neon_u16, 2)
1003 #define NEON_FN(dest, src1, src2) dest = src1 - src2
1004 NEON_VOP(sub_u8, neon_u8, 4)
1005 NEON_VOP(sub_u16, neon_u16, 2)
1008 #define NEON_FN(dest, src1, src2) dest = src1 * src2
1009 NEON_VOP(mul_u8, neon_u8, 4)
1010 NEON_VOP(mul_u16, neon_u16, 2)
1013 /* Polynomial multiplication is like integer multiplication except the
1014 partial products are XORed, not added. */
1015 uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2)
1025 mask |= (0xff << 8);
1026 if (op1 & (1 << 16))
1027 mask |= (0xff << 16);
1028 if (op1 & (1 << 24))
1029 mask |= (0xff << 24);
1030 result ^= op2 & mask;
1031 op1 = (op1 >> 1) & 0x7f7f7f7f;
1032 op2 = (op2 << 1) & 0xfefefefe;
1037 uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2)
1039 uint64_t result = 0;
1041 uint64_t op2ex = op2;
1042 op2ex = (op2ex & 0xff) |
1043 ((op2ex & 0xff00) << 8) |
1044 ((op2ex & 0xff0000) << 16) |
1045 ((op2ex & 0xff000000) << 24);
1051 if (op1 & (1 << 8)) {
1052 mask |= (0xffffU << 16);
1054 if (op1 & (1 << 16)) {
1055 mask |= (0xffffULL << 32);
1057 if (op1 & (1 << 24)) {
1058 mask |= (0xffffULL << 48);
1060 result ^= op2ex & mask;
1061 op1 = (op1 >> 1) & 0x7f7f7f7f;
1067 #define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
1068 NEON_VOP(tst_u8, neon_u8, 4)
1069 NEON_VOP(tst_u16, neon_u16, 2)
1070 NEON_VOP(tst_u32, neon_u32, 1)
1073 #define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
1074 NEON_VOP(ceq_u8, neon_u8, 4)
1075 NEON_VOP(ceq_u16, neon_u16, 2)
1076 NEON_VOP(ceq_u32, neon_u32, 1)
1079 #define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
1080 NEON_VOP1(abs_s8, neon_s8, 4)
1081 NEON_VOP1(abs_s16, neon_s16, 2)
1084 /* Count Leading Sign/Zero Bits. */
1085 static inline int do_clz8(uint8_t x)
1093 static inline int do_clz16(uint16_t x)
1096 for (n = 16; x; n--)
1101 #define NEON_FN(dest, src, dummy) dest = do_clz8(src)
1102 NEON_VOP1(clz_u8, neon_u8, 4)
1105 #define NEON_FN(dest, src, dummy) dest = do_clz16(src)
1106 NEON_VOP1(clz_u16, neon_u16, 2)
1109 #define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
1110 NEON_VOP1(cls_s8, neon_s8, 4)
1113 #define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
1114 NEON_VOP1(cls_s16, neon_s16, 2)
1117 uint32_t HELPER(neon_cls_s32)(uint32_t x)
1122 for (count = 32; x; count--)
1128 uint32_t HELPER(neon_cnt_u8)(uint32_t x)
1130 x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
1131 x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
1132 x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f);
1136 #define NEON_QDMULH16(dest, src1, src2, round) do { \
1137 uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
1138 if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
1140 tmp = (tmp >> 31) ^ ~SIGNBIT; \
1145 int32_t old = tmp; \
1147 if ((int32_t)tmp < old) { \
1149 tmp = SIGNBIT - 1; \
1154 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
1155 NEON_VOP_ENV(qdmulh_s16, neon_s16, 2)
1157 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
1158 NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
1160 #undef NEON_QDMULH16
1162 #define NEON_QDMULH32(dest, src1, src2, round) do { \
1163 uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
1164 if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
1166 tmp = (tmp >> 63) ^ ~SIGNBIT64; \
1171 int64_t old = tmp; \
1172 tmp += (int64_t)1 << 31; \
1173 if ((int64_t)tmp < old) { \
1175 tmp = SIGNBIT64 - 1; \
1180 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
1181 NEON_VOP_ENV(qdmulh_s32, neon_s32, 1)
1183 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
1184 NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
1186 #undef NEON_QDMULH32
1188 uint32_t HELPER(neon_narrow_u8)(uint64_t x)
1190 return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u)
1191 | ((x >> 24) & 0xff000000u);
1194 uint32_t HELPER(neon_narrow_u16)(uint64_t x)
1196 return (x & 0xffffu) | ((x >> 16) & 0xffff0000u);
1199 uint32_t HELPER(neon_narrow_high_u8)(uint64_t x)
1201 return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
1202 | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
1205 uint32_t HELPER(neon_narrow_high_u16)(uint64_t x)
1207 return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
1210 uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x)
1212 x &= 0xff80ff80ff80ff80ull;
1213 x += 0x0080008000800080ull;
1214 return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
1215 | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
1218 uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
1220 x &= 0xffff8000ffff8000ull;
1221 x += 0x0000800000008000ull;
1222 return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
1225 uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x)
1241 res |= (uint32_t)d << (n / 2); \
1252 uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x)
1265 res |= (uint32_t)d << (n / 2);
1275 uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x)
1282 if (s != (int8_t)s) { \
1283 d = (s >> 15) ^ 0x7f; \
1288 res |= (uint32_t)d << (n / 2);
1298 uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x)
1303 if (low & 0x80000000) {
1306 } else if (low > 0xffff) {
1311 if (high & 0x80000000) {
1314 } else if (high > 0xffff) {
1318 return low | (high << 16);
1321 uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x)
1331 if (high > 0xffff) {
1335 return low | (high << 16);
1338 uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x)
1343 if (low != (int16_t)low) {
1344 low = (low >> 31) ^ 0x7fff;
1348 if (high != (int16_t)high) {
1349 high = (high >> 31) ^ 0x7fff;
1352 return (uint16_t)low | (high << 16);
1355 uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x)
1357 if (x & 0x8000000000000000ull) {
1361 if (x > 0xffffffffu) {
1368 uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x)
1370 if (x > 0xffffffffu) {
1377 uint32_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x)
1379 if ((int64_t)x != (int32_t)x) {
1381 return ((int64_t)x >> 63) ^ 0x7fffffff;
1386 uint64_t HELPER(neon_widen_u8)(uint32_t x)
1391 tmp = (uint8_t)(x >> 8);
1393 tmp = (uint8_t)(x >> 16);
1395 tmp = (uint8_t)(x >> 24);
1400 uint64_t HELPER(neon_widen_s8)(uint32_t x)
1404 ret = (uint16_t)(int8_t)x;
1405 tmp = (uint16_t)(int8_t)(x >> 8);
1407 tmp = (uint16_t)(int8_t)(x >> 16);
1409 tmp = (uint16_t)(int8_t)(x >> 24);
1414 uint64_t HELPER(neon_widen_u16)(uint32_t x)
1416 uint64_t high = (uint16_t)(x >> 16);
1417 return ((uint16_t)x) | (high << 32);
1420 uint64_t HELPER(neon_widen_s16)(uint32_t x)
1422 uint64_t high = (int16_t)(x >> 16);
1423 return ((uint32_t)(int16_t)x) | (high << 32);
1426 uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b)
1429 mask = (a ^ b) & 0x8000800080008000ull;
1430 a &= ~0x8000800080008000ull;
1431 b &= ~0x8000800080008000ull;
1432 return (a + b) ^ mask;
1435 uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
1438 mask = (a ^ b) & 0x8000000080000000ull;
1439 a &= ~0x8000000080000000ull;
1440 b &= ~0x8000000080000000ull;
1441 return (a + b) ^ mask;
1444 uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
1449 tmp = a & 0x0000ffff0000ffffull;
1450 tmp += (a >> 16) & 0x0000ffff0000ffffull;
1451 tmp2 = b & 0xffff0000ffff0000ull;
1452 tmp2 += (b << 16) & 0xffff0000ffff0000ull;
1453 return ( tmp & 0xffff)
1454 | ((tmp >> 16) & 0xffff0000ull)
1455 | ((tmp2 << 16) & 0xffff00000000ull)
1456 | ( tmp2 & 0xffff000000000000ull);
1459 uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
1461 uint32_t low = a + (a >> 32);
1462 uint32_t high = b + (b >> 32);
1463 return low + ((uint64_t)high << 32);
1466 uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b)
1469 mask = (a ^ ~b) & 0x8000800080008000ull;
1470 a |= 0x8000800080008000ull;
1471 b &= ~0x8000800080008000ull;
1472 return (a - b) ^ mask;
1475 uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
1478 mask = (a ^ ~b) & 0x8000000080000000ull;
1479 a |= 0x8000000080000000ull;
1480 b &= ~0x8000000080000000ull;
1481 return (a - b) ^ mask;
1484 uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b)
1492 if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
1494 low = ((int32_t)x >> 31) ^ ~SIGNBIT;
1499 if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
1501 high = ((int32_t)x >> 31) ^ ~SIGNBIT;
1503 return low | ((uint64_t)high << 32);
1506 uint64_t HELPER(neon_addl_saturate_s64)(CPUARMState *env, uint64_t a, uint64_t b)
1511 if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) {
1513 result = ((int64_t)a >> 63) ^ ~SIGNBIT64;
1518 /* We have to do the arithmetic in a larger type than
1519 * the input type, because for example with a signed 32 bit
1520 * op the absolute difference can overflow a signed 32 bit value.
1522 #define DO_ABD(dest, x, y, intype, arithtype) do { \
1523 arithtype tmp_x = (intype)(x); \
1524 arithtype tmp_y = (intype)(y); \
1525 dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
1528 uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b)
1532 DO_ABD(result, a, b, uint8_t, uint32_t);
1533 DO_ABD(tmp, a >> 8, b >> 8, uint8_t, uint32_t);
1534 result |= tmp << 16;
1535 DO_ABD(tmp, a >> 16, b >> 16, uint8_t, uint32_t);
1536 result |= tmp << 32;
1537 DO_ABD(tmp, a >> 24, b >> 24, uint8_t, uint32_t);
1538 result |= tmp << 48;
1542 uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b)
1546 DO_ABD(result, a, b, int8_t, int32_t);
1547 DO_ABD(tmp, a >> 8, b >> 8, int8_t, int32_t);
1548 result |= tmp << 16;
1549 DO_ABD(tmp, a >> 16, b >> 16, int8_t, int32_t);
1550 result |= tmp << 32;
1551 DO_ABD(tmp, a >> 24, b >> 24, int8_t, int32_t);
1552 result |= tmp << 48;
1556 uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b)
1560 DO_ABD(result, a, b, uint16_t, uint32_t);
1561 DO_ABD(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
1562 return result | (tmp << 32);
1565 uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b)
1569 DO_ABD(result, a, b, int16_t, int32_t);
1570 DO_ABD(tmp, a >> 16, b >> 16, int16_t, int32_t);
1571 return result | (tmp << 32);
1574 uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b)
1577 DO_ABD(result, a, b, uint32_t, uint64_t);
1581 uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b)
1584 DO_ABD(result, a, b, int32_t, int64_t);
1589 /* Widening multiply. Named type is the source type. */
1590 #define DO_MULL(dest, x, y, type1, type2) do { \
1593 dest = (type2)((type2)tmp_x * (type2)tmp_y); \
1596 uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b)
1601 DO_MULL(result, a, b, uint8_t, uint16_t);
1602 DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t);
1603 result |= tmp << 16;
1604 DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t);
1605 result |= tmp << 32;
1606 DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t);
1607 result |= tmp << 48;
1611 uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b)
1616 DO_MULL(result, a, b, int8_t, uint16_t);
1617 DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t);
1618 result |= tmp << 16;
1619 DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t);
1620 result |= tmp << 32;
1621 DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t);
1622 result |= tmp << 48;
1626 uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b)
1631 DO_MULL(result, a, b, uint16_t, uint32_t);
1632 DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
1633 return result | (tmp << 32);
1636 uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b)
1641 DO_MULL(result, a, b, int16_t, uint32_t);
1642 DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t);
1643 return result | (tmp << 32);
1646 uint64_t HELPER(neon_negl_u16)(uint64_t x)
1650 result = (uint16_t)-x;
1652 result |= (uint64_t)tmp << 16;
1654 result |= (uint64_t)tmp << 32;
1656 result |= (uint64_t)tmp << 48;
1660 uint64_t HELPER(neon_negl_u32)(uint64_t x)
1663 uint32_t high = -(x >> 32);
1664 return low | ((uint64_t)high << 32);
1667 /* Saturating sign manipulation. */
1668 /* ??? Make these use NEON_VOP1 */
1669 #define DO_QABS8(x) do { \
1670 if (x == (int8_t)0x80) { \
1673 } else if (x < 0) { \
1676 uint32_t HELPER(neon_qabs_s8)(CPUARMState *env, uint32_t x)
1679 NEON_UNPACK(neon_s8, vec, x);
1684 NEON_PACK(neon_s8, x, vec);
1689 #define DO_QNEG8(x) do { \
1690 if (x == (int8_t)0x80) { \
1696 uint32_t HELPER(neon_qneg_s8)(CPUARMState *env, uint32_t x)
1699 NEON_UNPACK(neon_s8, vec, x);
1704 NEON_PACK(neon_s8, x, vec);
1709 #define DO_QABS16(x) do { \
1710 if (x == (int16_t)0x8000) { \
1713 } else if (x < 0) { \
1716 uint32_t HELPER(neon_qabs_s16)(CPUARMState *env, uint32_t x)
1719 NEON_UNPACK(neon_s16, vec, x);
1722 NEON_PACK(neon_s16, x, vec);
1727 #define DO_QNEG16(x) do { \
1728 if (x == (int16_t)0x8000) { \
1734 uint32_t HELPER(neon_qneg_s16)(CPUARMState *env, uint32_t x)
1737 NEON_UNPACK(neon_s16, vec, x);
1740 NEON_PACK(neon_s16, x, vec);
1745 uint32_t HELPER(neon_qabs_s32)(CPUARMState *env, uint32_t x)
1750 } else if ((int32_t)x < 0) {
1756 uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x)
1767 /* NEON Float helpers. */
1768 uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b, void *fpstp)
1770 float_status *fpst = fpstp;
1771 return float32_val(float32_min(make_float32(a), make_float32(b), fpst));
1774 uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b, void *fpstp)
1776 float_status *fpst = fpstp;
1777 return float32_val(float32_max(make_float32(a), make_float32(b), fpst));
1780 uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp)
1782 float_status *fpst = fpstp;
1783 float32 f0 = make_float32(a);
1784 float32 f1 = make_float32(b);
1785 return float32_val(float32_abs(float32_sub(f0, f1, fpst)));
1788 /* Floating point comparisons produce an integer result.
1789 * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
1790 * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
1792 uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp)
1794 float_status *fpst = fpstp;
1795 return -float32_eq_quiet(make_float32(a), make_float32(b), fpst);
1798 uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp)
1800 float_status *fpst = fpstp;
1801 return -float32_le(make_float32(b), make_float32(a), fpst);
1804 uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp)
1806 float_status *fpst = fpstp;
1807 return -float32_lt(make_float32(b), make_float32(a), fpst);
1810 uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp)
1812 float_status *fpst = fpstp;
1813 float32 f0 = float32_abs(make_float32(a));
1814 float32 f1 = float32_abs(make_float32(b));
1815 return -float32_le(f1, f0, fpst);
1818 uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp)
1820 float_status *fpst = fpstp;
1821 float32 f0 = float32_abs(make_float32(a));
1822 float32 f1 = float32_abs(make_float32(b));
1823 return -float32_lt(f1, f0, fpst);
1826 #define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
1828 void HELPER(neon_qunzip8)(CPUARMState *env, uint32_t rd, uint32_t rm)
1830 uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1831 uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1832 uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1833 uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1834 uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8)
1835 | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24)
1836 | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40)
1837 | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56);
1838 uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8)
1839 | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24)
1840 | (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
1841 | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56);
1842 uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8)
1843 | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24)
1844 | (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40)
1845 | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56);
1846 uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8)
1847 | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24)
1848 | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40)
1849 | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
1850 env->vfp.regs[rm] = make_float64(m0);
1851 env->vfp.regs[rm + 1] = make_float64(m1);
1852 env->vfp.regs[rd] = make_float64(d0);
1853 env->vfp.regs[rd + 1] = make_float64(d1);
1856 void HELPER(neon_qunzip16)(CPUARMState *env, uint32_t rd, uint32_t rm)
1858 uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1859 uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1860 uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1861 uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1862 uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16)
1863 | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48);
1864 uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16)
1865 | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48);
1866 uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16)
1867 | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48);
1868 uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16)
1869 | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
1870 env->vfp.regs[rm] = make_float64(m0);
1871 env->vfp.regs[rm + 1] = make_float64(m1);
1872 env->vfp.regs[rd] = make_float64(d0);
1873 env->vfp.regs[rd + 1] = make_float64(d1);
1876 void HELPER(neon_qunzip32)(CPUARMState *env, uint32_t rd, uint32_t rm)
1878 uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1879 uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1880 uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1881 uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1882 uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32);
1883 uint64_t d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32);
1884 uint64_t m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32);
1885 uint64_t m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32);
1886 env->vfp.regs[rm] = make_float64(m0);
1887 env->vfp.regs[rm + 1] = make_float64(m1);
1888 env->vfp.regs[rd] = make_float64(d0);
1889 env->vfp.regs[rd + 1] = make_float64(d1);
1892 void HELPER(neon_unzip8)(CPUARMState *env, uint32_t rd, uint32_t rm)
1894 uint64_t zm = float64_val(env->vfp.regs[rm]);
1895 uint64_t zd = float64_val(env->vfp.regs[rd]);
1896 uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8)
1897 | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24)
1898 | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40)
1899 | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56);
1900 uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8)
1901 | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24)
1902 | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40)
1903 | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56);
1904 env->vfp.regs[rm] = make_float64(m0);
1905 env->vfp.regs[rd] = make_float64(d0);
1908 void HELPER(neon_unzip16)(CPUARMState *env, uint32_t rd, uint32_t rm)
1910 uint64_t zm = float64_val(env->vfp.regs[rm]);
1911 uint64_t zd = float64_val(env->vfp.regs[rd]);
1912 uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16)
1913 | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48);
1914 uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16)
1915 | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48);
1916 env->vfp.regs[rm] = make_float64(m0);
1917 env->vfp.regs[rd] = make_float64(d0);
1920 void HELPER(neon_qzip8)(CPUARMState *env, uint32_t rd, uint32_t rm)
1922 uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1923 uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1924 uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1925 uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1926 uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8)
1927 | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24)
1928 | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40)
1929 | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56);
1930 uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8)
1931 | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24)
1932 | (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40)
1933 | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56);
1934 uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8)
1935 | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24)
1936 | (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
1937 | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56);
1938 uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8)
1939 | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24)
1940 | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40)
1941 | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
1942 env->vfp.regs[rm] = make_float64(m0);
1943 env->vfp.regs[rm + 1] = make_float64(m1);
1944 env->vfp.regs[rd] = make_float64(d0);
1945 env->vfp.regs[rd + 1] = make_float64(d1);
1948 void HELPER(neon_qzip16)(CPUARMState *env, uint32_t rd, uint32_t rm)
1950 uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1951 uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1952 uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1953 uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1954 uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16)
1955 | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48);
1956 uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16)
1957 | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48);
1958 uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16)
1959 | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48);
1960 uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16)
1961 | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
1962 env->vfp.regs[rm] = make_float64(m0);
1963 env->vfp.regs[rm + 1] = make_float64(m1);
1964 env->vfp.regs[rd] = make_float64(d0);
1965 env->vfp.regs[rd + 1] = make_float64(d1);
1968 void HELPER(neon_qzip32)(CPUARMState *env, uint32_t rd, uint32_t rm)
1970 uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1971 uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1972 uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1973 uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1974 uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32);
1975 uint64_t d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32);
1976 uint64_t m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32);
1977 uint64_t m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32);
1978 env->vfp.regs[rm] = make_float64(m0);
1979 env->vfp.regs[rm + 1] = make_float64(m1);
1980 env->vfp.regs[rd] = make_float64(d0);
1981 env->vfp.regs[rd + 1] = make_float64(d1);
1984 void HELPER(neon_zip8)(CPUARMState *env, uint32_t rd, uint32_t rm)
1986 uint64_t zm = float64_val(env->vfp.regs[rm]);
1987 uint64_t zd = float64_val(env->vfp.regs[rd]);
1988 uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8)
1989 | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24)
1990 | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40)
1991 | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56);
1992 uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8)
1993 | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24)
1994 | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40)
1995 | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56);
1996 env->vfp.regs[rm] = make_float64(m0);
1997 env->vfp.regs[rd] = make_float64(d0);
2000 void HELPER(neon_zip16)(CPUARMState *env, uint32_t rd, uint32_t rm)
2002 uint64_t zm = float64_val(env->vfp.regs[rm]);
2003 uint64_t zd = float64_val(env->vfp.regs[rd]);
2004 uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16)
2005 | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48);
2006 uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16)
2007 | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48);
2008 env->vfp.regs[rm] = make_float64(m0);
2009 env->vfp.regs[rd] = make_float64(d0);