2 * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU.
4 * Copyright (c) 2014 Imagination Technologies
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec/helper-proto.h"
23 /* Data format min and max values */
24 #define DF_BITS(df) (1 << ((df) + 3))
26 #define DF_MAX_INT(df) (int64_t)((1LL << (DF_BITS(df) - 1)) - 1)
27 #define M_MAX_INT(m) (int64_t)((1LL << ((m) - 1)) - 1)
29 #define DF_MIN_INT(df) (int64_t)(-(1LL << (DF_BITS(df) - 1)))
30 #define M_MIN_INT(m) (int64_t)(-(1LL << ((m) - 1)))
32 #define DF_MAX_UINT(df) (uint64_t)(-1ULL >> (64 - DF_BITS(df)))
33 #define M_MAX_UINT(m) (uint64_t)(-1ULL >> (64 - (m)))
35 #define UNSIGNED(x, df) ((x) & DF_MAX_UINT(df))
36 #define SIGNED(x, df) \
37 ((((int64_t)x) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)))
39 /* Element-by-element access macros */
40 #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
42 static inline void msa_move_v(wr_t *pwd, wr_t *pws)
46 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
47 pwd->d[i] = pws->d[i];
51 #define MSA_FN_IMM8(FUNC, DEST, OPERATION) \
52 void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
55 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
56 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
58 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
63 MSA_FN_IMM8(andi_b, pwd->b[i], pws->b[i] & i8)
64 MSA_FN_IMM8(ori_b, pwd->b[i], pws->b[i] | i8)
65 MSA_FN_IMM8(nori_b, pwd->b[i], ~(pws->b[i] | i8))
66 MSA_FN_IMM8(xori_b, pwd->b[i], pws->b[i] ^ i8)
68 #define BIT_MOVE_IF_NOT_ZERO(dest, arg1, arg2, df) \
69 UNSIGNED(((dest & (~arg2)) | (arg1 & arg2)), df)
70 MSA_FN_IMM8(bmnzi_b, pwd->b[i],
71 BIT_MOVE_IF_NOT_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE))
73 #define BIT_MOVE_IF_ZERO(dest, arg1, arg2, df) \
74 UNSIGNED((dest & arg2) | (arg1 & (~arg2)), df)
75 MSA_FN_IMM8(bmzi_b, pwd->b[i],
76 BIT_MOVE_IF_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE))
78 #define BIT_SELECT(dest, arg1, arg2, df) \
79 UNSIGNED((arg1 & (~dest)) | (arg2 & dest), df)
80 MSA_FN_IMM8(bseli_b, pwd->b[i],
81 BIT_SELECT(pwd->b[i], pws->b[i], i8, DF_BYTE))
85 #define SHF_POS(i, imm) (((i) & 0xfc) + (((imm) >> (2 * ((i) & 0x03))) & 0x03))
87 void helper_msa_shf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
88 uint32_t ws, uint32_t imm)
90 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
91 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
97 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
98 pwx->b[i] = pws->b[SHF_POS(i, imm)];
102 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
103 pwx->h[i] = pws->h[SHF_POS(i, imm)];
107 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
108 pwx->w[i] = pws->w[SHF_POS(i, imm)];
114 msa_move_v(pwd, pwx);
117 #define MSA_FN_VECTOR(FUNC, DEST, OPERATION) \
118 void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
121 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
122 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
123 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
125 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
130 MSA_FN_VECTOR(and_v, pwd->d[i], pws->d[i] & pwt->d[i])
131 MSA_FN_VECTOR(or_v, pwd->d[i], pws->d[i] | pwt->d[i])
132 MSA_FN_VECTOR(nor_v, pwd->d[i], ~(pws->d[i] | pwt->d[i]))
133 MSA_FN_VECTOR(xor_v, pwd->d[i], pws->d[i] ^ pwt->d[i])
134 MSA_FN_VECTOR(bmnz_v, pwd->d[i],
135 BIT_MOVE_IF_NOT_ZERO(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE))
136 MSA_FN_VECTOR(bmz_v, pwd->d[i],
137 BIT_MOVE_IF_ZERO(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE))
138 MSA_FN_VECTOR(bsel_v, pwd->d[i],
139 BIT_SELECT(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE))
140 #undef BIT_MOVE_IF_NOT_ZERO
141 #undef BIT_MOVE_IF_ZERO
145 static inline int64_t msa_addv_df(uint32_t df, int64_t arg1, int64_t arg2)
150 static inline int64_t msa_subv_df(uint32_t df, int64_t arg1, int64_t arg2)
155 static inline int64_t msa_ceq_df(uint32_t df, int64_t arg1, int64_t arg2)
157 return arg1 == arg2 ? -1 : 0;
160 static inline int64_t msa_cle_s_df(uint32_t df, int64_t arg1, int64_t arg2)
162 return arg1 <= arg2 ? -1 : 0;
165 static inline int64_t msa_cle_u_df(uint32_t df, int64_t arg1, int64_t arg2)
167 uint64_t u_arg1 = UNSIGNED(arg1, df);
168 uint64_t u_arg2 = UNSIGNED(arg2, df);
169 return u_arg1 <= u_arg2 ? -1 : 0;
172 static inline int64_t msa_clt_s_df(uint32_t df, int64_t arg1, int64_t arg2)
174 return arg1 < arg2 ? -1 : 0;
177 static inline int64_t msa_clt_u_df(uint32_t df, int64_t arg1, int64_t arg2)
179 uint64_t u_arg1 = UNSIGNED(arg1, df);
180 uint64_t u_arg2 = UNSIGNED(arg2, df);
181 return u_arg1 < u_arg2 ? -1 : 0;
184 static inline int64_t msa_max_s_df(uint32_t df, int64_t arg1, int64_t arg2)
186 return arg1 > arg2 ? arg1 : arg2;
189 static inline int64_t msa_max_u_df(uint32_t df, int64_t arg1, int64_t arg2)
191 uint64_t u_arg1 = UNSIGNED(arg1, df);
192 uint64_t u_arg2 = UNSIGNED(arg2, df);
193 return u_arg1 > u_arg2 ? arg1 : arg2;
196 static inline int64_t msa_min_s_df(uint32_t df, int64_t arg1, int64_t arg2)
198 return arg1 < arg2 ? arg1 : arg2;
201 static inline int64_t msa_min_u_df(uint32_t df, int64_t arg1, int64_t arg2)
203 uint64_t u_arg1 = UNSIGNED(arg1, df);
204 uint64_t u_arg2 = UNSIGNED(arg2, df);
205 return u_arg1 < u_arg2 ? arg1 : arg2;
208 #define MSA_BINOP_IMM_DF(helper, func) \
209 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
210 uint32_t wd, uint32_t ws, int32_t u5) \
212 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
213 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
218 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
219 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
223 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
224 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
228 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
229 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
233 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
234 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
242 MSA_BINOP_IMM_DF(addvi, addv)
243 MSA_BINOP_IMM_DF(subvi, subv)
244 MSA_BINOP_IMM_DF(ceqi, ceq)
245 MSA_BINOP_IMM_DF(clei_s, cle_s)
246 MSA_BINOP_IMM_DF(clei_u, cle_u)
247 MSA_BINOP_IMM_DF(clti_s, clt_s)
248 MSA_BINOP_IMM_DF(clti_u, clt_u)
249 MSA_BINOP_IMM_DF(maxi_s, max_s)
250 MSA_BINOP_IMM_DF(maxi_u, max_u)
251 MSA_BINOP_IMM_DF(mini_s, min_s)
252 MSA_BINOP_IMM_DF(mini_u, min_u)
253 #undef MSA_BINOP_IMM_DF
255 void helper_msa_ldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
258 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
263 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
264 pwd->b[i] = (int8_t)s10;
268 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
269 pwd->h[i] = (int16_t)s10;
273 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
274 pwd->w[i] = (int32_t)s10;
278 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
279 pwd->d[i] = (int64_t)s10;
287 /* Data format bit position and unsigned values */
288 #define BIT_POSITION(x, df) ((uint64_t)(x) % DF_BITS(df))
290 static inline int64_t msa_sll_df(uint32_t df, int64_t arg1, int64_t arg2)
292 int32_t b_arg2 = BIT_POSITION(arg2, df);
293 return arg1 << b_arg2;
296 static inline int64_t msa_sra_df(uint32_t df, int64_t arg1, int64_t arg2)
298 int32_t b_arg2 = BIT_POSITION(arg2, df);
299 return arg1 >> b_arg2;
302 static inline int64_t msa_srl_df(uint32_t df, int64_t arg1, int64_t arg2)
304 uint64_t u_arg1 = UNSIGNED(arg1, df);
305 int32_t b_arg2 = BIT_POSITION(arg2, df);
306 return u_arg1 >> b_arg2;
309 static inline int64_t msa_bclr_df(uint32_t df, int64_t arg1, int64_t arg2)
311 int32_t b_arg2 = BIT_POSITION(arg2, df);
312 return UNSIGNED(arg1 & (~(1LL << b_arg2)), df);
315 static inline int64_t msa_bset_df(uint32_t df, int64_t arg1,
318 int32_t b_arg2 = BIT_POSITION(arg2, df);
319 return UNSIGNED(arg1 | (1LL << b_arg2), df);
322 static inline int64_t msa_bneg_df(uint32_t df, int64_t arg1, int64_t arg2)
324 int32_t b_arg2 = BIT_POSITION(arg2, df);
325 return UNSIGNED(arg1 ^ (1LL << b_arg2), df);
328 static inline int64_t msa_binsl_df(uint32_t df, int64_t dest, int64_t arg1,
331 uint64_t u_arg1 = UNSIGNED(arg1, df);
332 uint64_t u_dest = UNSIGNED(dest, df);
333 int32_t sh_d = BIT_POSITION(arg2, df) + 1;
334 int32_t sh_a = DF_BITS(df) - sh_d;
335 if (sh_d == DF_BITS(df)) {
338 return UNSIGNED(UNSIGNED(u_dest << sh_d, df) >> sh_d, df) |
339 UNSIGNED(UNSIGNED(u_arg1 >> sh_a, df) << sh_a, df);
343 static inline int64_t msa_binsr_df(uint32_t df, int64_t dest, int64_t arg1,
346 uint64_t u_arg1 = UNSIGNED(arg1, df);
347 uint64_t u_dest = UNSIGNED(dest, df);
348 int32_t sh_d = BIT_POSITION(arg2, df) + 1;
349 int32_t sh_a = DF_BITS(df) - sh_d;
350 if (sh_d == DF_BITS(df)) {
353 return UNSIGNED(UNSIGNED(u_dest >> sh_d, df) << sh_d, df) |
354 UNSIGNED(UNSIGNED(u_arg1 << sh_a, df) >> sh_a, df);
358 static inline int64_t msa_sat_s_df(uint32_t df, int64_t arg, uint32_t m)
360 return arg < M_MIN_INT(m+1) ? M_MIN_INT(m+1) :
361 arg > M_MAX_INT(m+1) ? M_MAX_INT(m+1) :
365 static inline int64_t msa_sat_u_df(uint32_t df, int64_t arg, uint32_t m)
367 uint64_t u_arg = UNSIGNED(arg, df);
368 return u_arg < M_MAX_UINT(m+1) ? u_arg :
372 static inline int64_t msa_srar_df(uint32_t df, int64_t arg1, int64_t arg2)
374 int32_t b_arg2 = BIT_POSITION(arg2, df);
378 int64_t r_bit = (arg1 >> (b_arg2 - 1)) & 1;
379 return (arg1 >> b_arg2) + r_bit;
383 static inline int64_t msa_srlr_df(uint32_t df, int64_t arg1, int64_t arg2)
385 uint64_t u_arg1 = UNSIGNED(arg1, df);
386 int32_t b_arg2 = BIT_POSITION(arg2, df);
390 uint64_t r_bit = (u_arg1 >> (b_arg2 - 1)) & 1;
391 return (u_arg1 >> b_arg2) + r_bit;
395 #define MSA_BINOP_IMMU_DF(helper, func) \
396 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
397 uint32_t ws, uint32_t u5) \
399 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
400 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
405 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
406 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
410 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
411 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
415 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
416 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
420 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
421 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
429 MSA_BINOP_IMMU_DF(slli, sll)
430 MSA_BINOP_IMMU_DF(srai, sra)
431 MSA_BINOP_IMMU_DF(srli, srl)
432 MSA_BINOP_IMMU_DF(bclri, bclr)
433 MSA_BINOP_IMMU_DF(bseti, bset)
434 MSA_BINOP_IMMU_DF(bnegi, bneg)
435 MSA_BINOP_IMMU_DF(sat_s, sat_s)
436 MSA_BINOP_IMMU_DF(sat_u, sat_u)
437 MSA_BINOP_IMMU_DF(srari, srar)
438 MSA_BINOP_IMMU_DF(srlri, srlr)
439 #undef MSA_BINOP_IMMU_DF
441 #define MSA_TEROP_IMMU_DF(helper, func) \
442 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
443 uint32_t wd, uint32_t ws, uint32_t u5) \
445 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
446 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
451 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
452 pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
457 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
458 pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
463 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
464 pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
469 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
470 pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
479 MSA_TEROP_IMMU_DF(binsli, binsl)
480 MSA_TEROP_IMMU_DF(binsri, binsr)
481 #undef MSA_TEROP_IMMU_DF
483 static inline int64_t msa_max_a_df(uint32_t df, int64_t arg1, int64_t arg2)
485 uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
486 uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
487 return abs_arg1 > abs_arg2 ? arg1 : arg2;
490 static inline int64_t msa_min_a_df(uint32_t df, int64_t arg1, int64_t arg2)
492 uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
493 uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
494 return abs_arg1 < abs_arg2 ? arg1 : arg2;
497 static inline int64_t msa_add_a_df(uint32_t df, int64_t arg1, int64_t arg2)
499 uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
500 uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
501 return abs_arg1 + abs_arg2;
504 static inline int64_t msa_adds_a_df(uint32_t df, int64_t arg1, int64_t arg2)
506 uint64_t max_int = (uint64_t)DF_MAX_INT(df);
507 uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
508 uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
509 if (abs_arg1 > max_int || abs_arg2 > max_int) {
510 return (int64_t)max_int;
512 return (abs_arg1 < max_int - abs_arg2) ? abs_arg1 + abs_arg2 : max_int;
516 static inline int64_t msa_adds_s_df(uint32_t df, int64_t arg1, int64_t arg2)
518 int64_t max_int = DF_MAX_INT(df);
519 int64_t min_int = DF_MIN_INT(df);
521 return (min_int - arg1 < arg2) ? arg1 + arg2 : min_int;
523 return (arg2 < max_int - arg1) ? arg1 + arg2 : max_int;
527 static inline uint64_t msa_adds_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
529 uint64_t max_uint = DF_MAX_UINT(df);
530 uint64_t u_arg1 = UNSIGNED(arg1, df);
531 uint64_t u_arg2 = UNSIGNED(arg2, df);
532 return (u_arg1 < max_uint - u_arg2) ? u_arg1 + u_arg2 : max_uint;
535 static inline int64_t msa_ave_s_df(uint32_t df, int64_t arg1, int64_t arg2)
538 return (arg1 >> 1) + (arg2 >> 1) + (arg1 & arg2 & 1);
541 static inline uint64_t msa_ave_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
543 uint64_t u_arg1 = UNSIGNED(arg1, df);
544 uint64_t u_arg2 = UNSIGNED(arg2, df);
546 return (u_arg1 >> 1) + (u_arg2 >> 1) + (u_arg1 & u_arg2 & 1);
549 static inline int64_t msa_aver_s_df(uint32_t df, int64_t arg1, int64_t arg2)
552 return (arg1 >> 1) + (arg2 >> 1) + ((arg1 | arg2) & 1);
555 static inline uint64_t msa_aver_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
557 uint64_t u_arg1 = UNSIGNED(arg1, df);
558 uint64_t u_arg2 = UNSIGNED(arg2, df);
560 return (u_arg1 >> 1) + (u_arg2 >> 1) + ((u_arg1 | u_arg2) & 1);
563 static inline int64_t msa_subs_s_df(uint32_t df, int64_t arg1, int64_t arg2)
565 int64_t max_int = DF_MAX_INT(df);
566 int64_t min_int = DF_MIN_INT(df);
568 return (min_int + arg2 < arg1) ? arg1 - arg2 : min_int;
570 return (arg1 < max_int + arg2) ? arg1 - arg2 : max_int;
574 static inline int64_t msa_subs_u_df(uint32_t df, int64_t arg1, int64_t arg2)
576 uint64_t u_arg1 = UNSIGNED(arg1, df);
577 uint64_t u_arg2 = UNSIGNED(arg2, df);
578 return (u_arg1 > u_arg2) ? u_arg1 - u_arg2 : 0;
581 static inline int64_t msa_subsus_u_df(uint32_t df, int64_t arg1, int64_t arg2)
583 uint64_t u_arg1 = UNSIGNED(arg1, df);
584 uint64_t max_uint = DF_MAX_UINT(df);
586 uint64_t u_arg2 = (uint64_t)arg2;
587 return (u_arg1 > u_arg2) ?
588 (int64_t)(u_arg1 - u_arg2) :
591 uint64_t u_arg2 = (uint64_t)(-arg2);
592 return (u_arg1 < max_uint - u_arg2) ?
593 (int64_t)(u_arg1 + u_arg2) :
598 static inline int64_t msa_subsuu_s_df(uint32_t df, int64_t arg1, int64_t arg2)
600 uint64_t u_arg1 = UNSIGNED(arg1, df);
601 uint64_t u_arg2 = UNSIGNED(arg2, df);
602 int64_t max_int = DF_MAX_INT(df);
603 int64_t min_int = DF_MIN_INT(df);
604 if (u_arg1 > u_arg2) {
605 return u_arg1 - u_arg2 < (uint64_t)max_int ?
606 (int64_t)(u_arg1 - u_arg2) :
609 return u_arg2 - u_arg1 < (uint64_t)(-min_int) ?
610 (int64_t)(u_arg1 - u_arg2) :
615 static inline int64_t msa_asub_s_df(uint32_t df, int64_t arg1, int64_t arg2)
618 return (arg1 < arg2) ?
619 (uint64_t)(arg2 - arg1) : (uint64_t)(arg1 - arg2);
622 static inline uint64_t msa_asub_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
624 uint64_t u_arg1 = UNSIGNED(arg1, df);
625 uint64_t u_arg2 = UNSIGNED(arg2, df);
626 /* unsigned compare */
627 return (u_arg1 < u_arg2) ?
628 (uint64_t)(u_arg2 - u_arg1) : (uint64_t)(u_arg1 - u_arg2);
631 static inline int64_t msa_mulv_df(uint32_t df, int64_t arg1, int64_t arg2)
636 static inline int64_t msa_div_s_df(uint32_t df, int64_t arg1, int64_t arg2)
638 if (arg1 == DF_MIN_INT(df) && arg2 == -1) {
639 return DF_MIN_INT(df);
641 return arg2 ? arg1 / arg2 : 0;
644 static inline int64_t msa_div_u_df(uint32_t df, int64_t arg1, int64_t arg2)
646 uint64_t u_arg1 = UNSIGNED(arg1, df);
647 uint64_t u_arg2 = UNSIGNED(arg2, df);
648 return u_arg2 ? u_arg1 / u_arg2 : 0;
651 static inline int64_t msa_mod_s_df(uint32_t df, int64_t arg1, int64_t arg2)
653 if (arg1 == DF_MIN_INT(df) && arg2 == -1) {
656 return arg2 ? arg1 % arg2 : 0;
659 static inline int64_t msa_mod_u_df(uint32_t df, int64_t arg1, int64_t arg2)
661 uint64_t u_arg1 = UNSIGNED(arg1, df);
662 uint64_t u_arg2 = UNSIGNED(arg2, df);
663 return u_arg2 ? u_arg1 % u_arg2 : 0;
666 #define SIGNED_EVEN(a, df) \
667 ((((int64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2))
669 #define UNSIGNED_EVEN(a, df) \
670 ((((uint64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2))
672 #define SIGNED_ODD(a, df) \
673 ((((int64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2))
675 #define UNSIGNED_ODD(a, df) \
676 ((((uint64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2))
678 #define SIGNED_EXTRACT(e, o, a, df) \
680 e = SIGNED_EVEN(a, df); \
681 o = SIGNED_ODD(a, df); \
684 #define UNSIGNED_EXTRACT(e, o, a, df) \
686 e = UNSIGNED_EVEN(a, df); \
687 o = UNSIGNED_ODD(a, df); \
690 static inline int64_t msa_dotp_s_df(uint32_t df, int64_t arg1, int64_t arg2)
696 SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
697 SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
698 return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
701 static inline int64_t msa_dotp_u_df(uint32_t df, int64_t arg1, int64_t arg2)
707 UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
708 UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
709 return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
712 #define CONCATENATE_AND_SLIDE(s, k) \
714 for (i = 0; i < s; i++) { \
715 v[i] = pws->b[s * k + i]; \
716 v[i + s] = pwd->b[s * k + i]; \
718 for (i = 0; i < s; i++) { \
719 pwd->b[s * k + i] = v[i + n]; \
723 static inline void msa_sld_df(uint32_t df, wr_t *pwd,
724 wr_t *pws, target_ulong rt)
726 uint32_t n = rt % DF_ELEMENTS(df);
732 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_BYTE), 0);
735 for (k = 0; k < 2; k++) {
736 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_HALF), k);
740 for (k = 0; k < 4; k++) {
741 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_WORD), k);
745 for (k = 0; k < 8; k++) {
746 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_DOUBLE), k);
754 static inline int64_t msa_hadd_s_df(uint32_t df, int64_t arg1, int64_t arg2)
756 return SIGNED_ODD(arg1, df) + SIGNED_EVEN(arg2, df);
759 static inline int64_t msa_hadd_u_df(uint32_t df, int64_t arg1, int64_t arg2)
761 return UNSIGNED_ODD(arg1, df) + UNSIGNED_EVEN(arg2, df);
764 static inline int64_t msa_hsub_s_df(uint32_t df, int64_t arg1, int64_t arg2)
766 return SIGNED_ODD(arg1, df) - SIGNED_EVEN(arg2, df);
769 static inline int64_t msa_hsub_u_df(uint32_t df, int64_t arg1, int64_t arg2)
771 return UNSIGNED_ODD(arg1, df) - UNSIGNED_EVEN(arg2, df);
774 static inline int64_t msa_mul_q_df(uint32_t df, int64_t arg1, int64_t arg2)
776 int64_t q_min = DF_MIN_INT(df);
777 int64_t q_max = DF_MAX_INT(df);
779 if (arg1 == q_min && arg2 == q_min) {
782 return (arg1 * arg2) >> (DF_BITS(df) - 1);
785 static inline int64_t msa_mulr_q_df(uint32_t df, int64_t arg1, int64_t arg2)
787 int64_t q_min = DF_MIN_INT(df);
788 int64_t q_max = DF_MAX_INT(df);
789 int64_t r_bit = 1 << (DF_BITS(df) - 2);
791 if (arg1 == q_min && arg2 == q_min) {
794 return (arg1 * arg2 + r_bit) >> (DF_BITS(df) - 1);
797 #define MSA_BINOP_DF(func) \
798 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
799 uint32_t wd, uint32_t ws, uint32_t wt) \
801 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
802 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
803 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
808 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
809 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], pwt->b[i]); \
813 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
814 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], pwt->h[i]); \
818 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
819 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], pwt->w[i]); \
823 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
824 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], pwt->d[i]); \
861 MSA_BINOP_DF(subsus_u)
862 MSA_BINOP_DF(subsuu_s)
883 void helper_msa_sld_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
884 uint32_t ws, uint32_t rt)
886 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
887 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
889 msa_sld_df(df, pwd, pws, env->active_tc.gpr[rt]);
892 static inline int64_t msa_maddv_df(uint32_t df, int64_t dest, int64_t arg1,
895 return dest + arg1 * arg2;
898 static inline int64_t msa_msubv_df(uint32_t df, int64_t dest, int64_t arg1,
901 return dest - arg1 * arg2;
904 static inline int64_t msa_dpadd_s_df(uint32_t df, int64_t dest, int64_t arg1,
911 SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
912 SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
913 return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
916 static inline int64_t msa_dpadd_u_df(uint32_t df, int64_t dest, int64_t arg1,
923 UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
924 UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
925 return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
928 static inline int64_t msa_dpsub_s_df(uint32_t df, int64_t dest, int64_t arg1,
935 SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
936 SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
937 return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2));
940 static inline int64_t msa_dpsub_u_df(uint32_t df, int64_t dest, int64_t arg1,
947 UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
948 UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
949 return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2));
952 static inline int64_t msa_madd_q_df(uint32_t df, int64_t dest, int64_t arg1,
955 int64_t q_prod, q_ret;
957 int64_t q_max = DF_MAX_INT(df);
958 int64_t q_min = DF_MIN_INT(df);
960 q_prod = arg1 * arg2;
961 q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod) >> (DF_BITS(df) - 1);
963 return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
966 static inline int64_t msa_msub_q_df(uint32_t df, int64_t dest, int64_t arg1,
969 int64_t q_prod, q_ret;
971 int64_t q_max = DF_MAX_INT(df);
972 int64_t q_min = DF_MIN_INT(df);
974 q_prod = arg1 * arg2;
975 q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod) >> (DF_BITS(df) - 1);
977 return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
980 static inline int64_t msa_maddr_q_df(uint32_t df, int64_t dest, int64_t arg1,
983 int64_t q_prod, q_ret;
985 int64_t q_max = DF_MAX_INT(df);
986 int64_t q_min = DF_MIN_INT(df);
987 int64_t r_bit = 1 << (DF_BITS(df) - 2);
989 q_prod = arg1 * arg2;
990 q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod + r_bit) >> (DF_BITS(df) - 1);
992 return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
995 static inline int64_t msa_msubr_q_df(uint32_t df, int64_t dest, int64_t arg1,
998 int64_t q_prod, q_ret;
1000 int64_t q_max = DF_MAX_INT(df);
1001 int64_t q_min = DF_MIN_INT(df);
1002 int64_t r_bit = 1 << (DF_BITS(df) - 2);
1004 q_prod = arg1 * arg2;
1005 q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod + r_bit) >> (DF_BITS(df) - 1);
1007 return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
1010 #define MSA_TEROP_DF(func) \
1011 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
1012 uint32_t ws, uint32_t wt) \
1014 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1015 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1016 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
1021 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
1022 pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
1027 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
1028 pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
1033 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
1034 pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
1039 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
1040 pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
1051 MSA_TEROP_DF(dpadd_s)
1052 MSA_TEROP_DF(dpadd_u)
1053 MSA_TEROP_DF(dpsub_s)
1054 MSA_TEROP_DF(dpsub_u)
1057 MSA_TEROP_DF(madd_q)
1058 MSA_TEROP_DF(msub_q)
1059 MSA_TEROP_DF(maddr_q)
1060 MSA_TEROP_DF(msubr_q)
1063 static inline void msa_splat_df(uint32_t df, wr_t *pwd,
1064 wr_t *pws, target_ulong rt)
1066 uint32_t n = rt % DF_ELEMENTS(df);
1071 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
1072 pwd->b[i] = pws->b[n];
1076 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
1077 pwd->h[i] = pws->h[n];
1081 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1082 pwd->w[i] = pws->w[n];
1086 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1087 pwd->d[i] = pws->d[n];
1095 void helper_msa_splat_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1096 uint32_t ws, uint32_t rt)
1098 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1099 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1101 msa_splat_df(df, pwd, pws, env->active_tc.gpr[rt]);
1104 #define MSA_DO_B MSA_DO(b)
1105 #define MSA_DO_H MSA_DO(h)
1106 #define MSA_DO_W MSA_DO(w)
1107 #define MSA_DO_D MSA_DO(d)
1109 #define MSA_LOOP_B MSA_LOOP(B)
1110 #define MSA_LOOP_H MSA_LOOP(H)
1111 #define MSA_LOOP_W MSA_LOOP(W)
1112 #define MSA_LOOP_D MSA_LOOP(D)
1114 #define MSA_LOOP_COND_B MSA_LOOP_COND(DF_BYTE)
1115 #define MSA_LOOP_COND_H MSA_LOOP_COND(DF_HALF)
1116 #define MSA_LOOP_COND_W MSA_LOOP_COND(DF_WORD)
1117 #define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE)
1119 #define MSA_LOOP(DF) \
1120 for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \
1124 #define MSA_FN_DF(FUNC) \
1125 void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \
1126 uint32_t ws, uint32_t wt) \
1128 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1129 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1130 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
1131 wr_t wx, *pwx = &wx; \
1149 msa_move_v(pwd, pwx); \
1152 #define MSA_LOOP_COND(DF) \
1153 (DF_ELEMENTS(DF) / 2)
1155 #define Rb(pwr, i) (pwr->b[i])
1156 #define Lb(pwr, i) (pwr->b[i + DF_ELEMENTS(DF_BYTE)/2])
1157 #define Rh(pwr, i) (pwr->h[i])
1158 #define Lh(pwr, i) (pwr->h[i + DF_ELEMENTS(DF_HALF)/2])
1159 #define Rw(pwr, i) (pwr->w[i])
1160 #define Lw(pwr, i) (pwr->w[i + DF_ELEMENTS(DF_WORD)/2])
1161 #define Rd(pwr, i) (pwr->d[i])
1162 #define Ld(pwr, i) (pwr->d[i + DF_ELEMENTS(DF_DOUBLE)/2])
1164 #define MSA_DO(DF) \
1166 R##DF(pwx, i) = pwt->DF[2*i]; \
1167 L##DF(pwx, i) = pws->DF[2*i]; \
1172 #define MSA_DO(DF) \
1174 R##DF(pwx, i) = pwt->DF[2*i+1]; \
1175 L##DF(pwx, i) = pws->DF[2*i+1]; \
1180 #define MSA_DO(DF) \
1182 pwx->DF[2*i] = L##DF(pwt, i); \
1183 pwx->DF[2*i+1] = L##DF(pws, i); \
1188 #define MSA_DO(DF) \
1190 pwx->DF[2*i] = R##DF(pwt, i); \
1191 pwx->DF[2*i+1] = R##DF(pws, i); \
1196 #define MSA_DO(DF) \
1198 pwx->DF[2*i] = pwt->DF[2*i]; \
1199 pwx->DF[2*i+1] = pws->DF[2*i]; \
1204 #define MSA_DO(DF) \
1206 pwx->DF[2*i] = pwt->DF[2*i+1]; \
1207 pwx->DF[2*i+1] = pws->DF[2*i+1]; \
1211 #undef MSA_LOOP_COND
1213 #define MSA_LOOP_COND(DF) \
1216 #define MSA_DO(DF) \
1218 uint32_t n = DF_ELEMENTS(df); \
1219 uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \
1221 (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \
1225 #undef MSA_LOOP_COND
1228 void helper_msa_sldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1229 uint32_t ws, uint32_t n)
1231 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1232 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1234 msa_sld_df(df, pwd, pws, n);
1237 void helper_msa_splati_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1238 uint32_t ws, uint32_t n)
1240 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1241 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1243 msa_splat_df(df, pwd, pws, n);
1246 void helper_msa_copy_s_df(CPUMIPSState *env, uint32_t df, uint32_t rd,
1247 uint32_t ws, uint32_t n)
1249 n %= DF_ELEMENTS(df);
1253 env->active_tc.gpr[rd] = (int8_t)env->active_fpu.fpr[ws].wr.b[n];
1256 env->active_tc.gpr[rd] = (int16_t)env->active_fpu.fpr[ws].wr.h[n];
1259 env->active_tc.gpr[rd] = (int32_t)env->active_fpu.fpr[ws].wr.w[n];
1261 #ifdef TARGET_MIPS64
1263 env->active_tc.gpr[rd] = (int64_t)env->active_fpu.fpr[ws].wr.d[n];
1271 void helper_msa_copy_u_df(CPUMIPSState *env, uint32_t df, uint32_t rd,
1272 uint32_t ws, uint32_t n)
1274 n %= DF_ELEMENTS(df);
1278 env->active_tc.gpr[rd] = (uint8_t)env->active_fpu.fpr[ws].wr.b[n];
1281 env->active_tc.gpr[rd] = (uint16_t)env->active_fpu.fpr[ws].wr.h[n];
1284 env->active_tc.gpr[rd] = (uint32_t)env->active_fpu.fpr[ws].wr.w[n];
1286 #ifdef TARGET_MIPS64
1288 env->active_tc.gpr[rd] = (uint64_t)env->active_fpu.fpr[ws].wr.d[n];
1296 void helper_msa_insert_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1297 uint32_t rs_num, uint32_t n)
1299 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1300 target_ulong rs = env->active_tc.gpr[rs_num];
1304 pwd->b[n] = (int8_t)rs;
1307 pwd->h[n] = (int16_t)rs;
1310 pwd->w[n] = (int32_t)rs;
1313 pwd->d[n] = (int64_t)rs;
1320 void helper_msa_insve_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1321 uint32_t ws, uint32_t n)
1323 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1324 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1328 pwd->b[n] = (int8_t)pws->b[0];
1331 pwd->h[n] = (int16_t)pws->h[0];
1334 pwd->w[n] = (int32_t)pws->w[0];
1337 pwd->d[n] = (int64_t)pws->d[0];
1344 void helper_msa_ctcmsa(CPUMIPSState *env, target_ulong elm, uint32_t cd)
1350 env->active_tc.msacsr = (int32_t)elm & MSACSR_MASK;
1351 /* set float_status rounding mode */
1352 set_float_rounding_mode(
1353 ieee_rm[(env->active_tc.msacsr & MSACSR_RM_MASK) >> MSACSR_RM],
1354 &env->active_tc.msa_fp_status);
1355 /* set float_status flush modes */
1357 (env->active_tc.msacsr & MSACSR_FS_MASK) != 0 ? 1 : 0,
1358 &env->active_tc.msa_fp_status);
1359 set_flush_inputs_to_zero(
1360 (env->active_tc.msacsr & MSACSR_FS_MASK) != 0 ? 1 : 0,
1361 &env->active_tc.msa_fp_status);
1362 /* check exception */
1363 if ((GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)
1364 & GET_FP_CAUSE(env->active_tc.msacsr)) {
1365 helper_raise_exception(env, EXCP_MSAFPE);
1371 target_ulong helper_msa_cfcmsa(CPUMIPSState *env, uint32_t cs)
1377 return env->active_tc.msacsr & MSACSR_MASK;
1382 void helper_msa_move_v(CPUMIPSState *env, uint32_t wd, uint32_t ws)
1384 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1385 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1387 msa_move_v(pwd, pws);
1390 static inline int64_t msa_pcnt_df(uint32_t df, int64_t arg)
1394 x = UNSIGNED(arg, df);
1396 x = (x & 0x5555555555555555ULL) + ((x >> 1) & 0x5555555555555555ULL);
1397 x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
1398 x = (x & 0x0F0F0F0F0F0F0F0FULL) + ((x >> 4) & 0x0F0F0F0F0F0F0F0FULL);
1399 x = (x & 0x00FF00FF00FF00FFULL) + ((x >> 8) & 0x00FF00FF00FF00FFULL);
1400 x = (x & 0x0000FFFF0000FFFFULL) + ((x >> 16) & 0x0000FFFF0000FFFFULL);
1401 x = (x & 0x00000000FFFFFFFFULL) + ((x >> 32));
1406 static inline int64_t msa_nlzc_df(uint32_t df, int64_t arg)
1411 x = UNSIGNED(arg, df);
1413 c = DF_BITS(df) / 2;
1427 static inline int64_t msa_nloc_df(uint32_t df, int64_t arg)
1429 return msa_nlzc_df(df, UNSIGNED((~arg), df));
1432 void helper_msa_fill_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1435 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1440 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
1441 pwd->b[i] = (int8_t)env->active_tc.gpr[rs];
1445 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
1446 pwd->h[i] = (int16_t)env->active_tc.gpr[rs];
1450 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1451 pwd->w[i] = (int32_t)env->active_tc.gpr[rs];
1455 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1456 pwd->d[i] = (int64_t)env->active_tc.gpr[rs];
1464 #define MSA_UNOP_DF(func) \
1465 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
1466 uint32_t wd, uint32_t ws) \
1468 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1469 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1474 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
1475 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i]); \
1479 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
1480 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i]); \
1484 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
1485 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i]); \
1489 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
1490 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i]); \
1503 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
1504 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1506 #define FLOAT_SNAN16 (float16_default_nan ^ 0x0220)
1508 #define FLOAT_SNAN32 (float32_default_nan ^ 0x00400020)
1510 #define FLOAT_SNAN64 (float64_default_nan ^ 0x0008000000000020ULL)
1511 /* 0x7ff0000000000020 */
1513 static inline void clear_msacsr_cause(CPUMIPSState *env)
1515 SET_FP_CAUSE(env->active_tc.msacsr, 0);
1518 static inline void check_msacsr_cause(CPUMIPSState *env)
1520 if ((GET_FP_CAUSE(env->active_tc.msacsr) &
1521 (GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)) == 0) {
1522 UPDATE_FP_FLAGS(env->active_tc.msacsr,
1523 GET_FP_CAUSE(env->active_tc.msacsr));
1525 helper_raise_exception(env, EXCP_MSAFPE);
1529 /* Flush-to-zero use cases for update_msacsr() */
1530 #define CLEAR_FS_UNDERFLOW 1
1531 #define CLEAR_IS_INEXACT 2
1532 #define RECIPROCAL_INEXACT 4
1534 static inline int update_msacsr(CPUMIPSState *env, int action, int denormal)
1542 ieee_ex = get_float_exception_flags(&env->active_tc.msa_fp_status);
1544 /* QEMU softfloat does not signal all underflow cases */
1546 ieee_ex |= float_flag_underflow;
1549 c = ieee_ex_to_mips(ieee_ex);
1550 enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED;
1552 /* Set Inexact (I) when flushing inputs to zero */
1553 if ((ieee_ex & float_flag_input_denormal) &&
1554 (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) {
1555 if (action & CLEAR_IS_INEXACT) {
1562 /* Set Inexact (I) and Underflow (U) when flushing outputs to zero */
1563 if ((ieee_ex & float_flag_output_denormal) &&
1564 (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) {
1566 if (action & CLEAR_FS_UNDERFLOW) {
1573 /* Set Inexact (I) when Overflow (O) is not enabled */
1574 if ((c & FP_OVERFLOW) != 0 && (enable & FP_OVERFLOW) == 0) {
1578 /* Clear Exact Underflow when Underflow (U) is not enabled */
1579 if ((c & FP_UNDERFLOW) != 0 && (enable & FP_UNDERFLOW) == 0 &&
1580 (c & FP_INEXACT) == 0) {
1584 /* Reciprocal operations set only Inexact when valid and not
1586 if ((action & RECIPROCAL_INEXACT) &&
1587 (c & (FP_INVALID | FP_DIV0)) == 0) {
1591 cause = c & enable; /* all current enabled exceptions */
1594 /* No enabled exception, update the MSACSR Cause
1595 with all current exceptions */
1596 SET_FP_CAUSE(env->active_tc.msacsr,
1597 (GET_FP_CAUSE(env->active_tc.msacsr) | c));
1599 /* Current exceptions are enabled */
1600 if ((env->active_tc.msacsr & MSACSR_NX_MASK) == 0) {
1601 /* Exception(s) will trap, update MSACSR Cause
1602 with all enabled exceptions */
1603 SET_FP_CAUSE(env->active_tc.msacsr,
1604 (GET_FP_CAUSE(env->active_tc.msacsr) | c));
1611 static inline int get_enabled_exceptions(const CPUMIPSState *env, int c)
1613 int enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED;
1617 static inline float16 float16_from_float32(int32 a, flag ieee,
1618 float_status *status)
1622 f_val = float32_to_float16((float32)a, ieee, status);
1623 f_val = float16_maybe_silence_nan(f_val);
1625 return a < 0 ? (f_val | (1 << 15)) : f_val;
1628 static inline float32 float32_from_float64(int64 a, float_status *status)
1632 f_val = float64_to_float32((float64)a, status);
1633 f_val = float32_maybe_silence_nan(f_val);
1635 return a < 0 ? (f_val | (1 << 31)) : f_val;
1638 static inline float32 float32_from_float16(int16_t a, flag ieee,
1639 float_status *status)
1643 f_val = float16_to_float32((float16)a, ieee, status);
1644 f_val = float32_maybe_silence_nan(f_val);
1646 return a < 0 ? (f_val | (1 << 31)) : f_val;
1649 static inline float64 float64_from_float32(int32 a, float_status *status)
1653 f_val = float32_to_float64((float64)a, status);
1654 f_val = float64_maybe_silence_nan(f_val);
1656 return a < 0 ? (f_val | (1ULL << 63)) : f_val;
1659 static inline float32 float32_from_q16(int16_t a, float_status *status)
1663 /* conversion as integer and scaling */
1664 f_val = int32_to_float32(a, status);
1665 f_val = float32_scalbn(f_val, -15, status);
1670 static inline float64 float64_from_q32(int32 a, float_status *status)
1674 /* conversion as integer and scaling */
1675 f_val = int32_to_float64(a, status);
1676 f_val = float64_scalbn(f_val, -31, status);
1681 static inline int16_t float32_to_q16(float32 a, float_status *status)
1684 int32 q_min = 0xffff8000;
1685 int32 q_max = 0x00007fff;
1689 if (float32_is_any_nan(a)) {
1690 float_raise(float_flag_invalid, status);
1695 a = float32_scalbn(a, 15, status);
1697 ieee_ex = get_float_exception_flags(status);
1698 set_float_exception_flags(ieee_ex & (~float_flag_underflow)
1701 if (ieee_ex & float_flag_overflow) {
1702 float_raise(float_flag_inexact, status);
1703 return (int32)a < 0 ? q_min : q_max;
1706 /* conversion to int */
1707 q_val = float32_to_int32(a, status);
1709 ieee_ex = get_float_exception_flags(status);
1710 set_float_exception_flags(ieee_ex & (~float_flag_underflow)
1713 if (ieee_ex & float_flag_invalid) {
1714 set_float_exception_flags(ieee_ex & (~float_flag_invalid)
1716 float_raise(float_flag_overflow | float_flag_inexact, status);
1717 return (int32)a < 0 ? q_min : q_max;
1720 if (q_val < q_min) {
1721 float_raise(float_flag_overflow | float_flag_inexact, status);
1722 return (int16_t)q_min;
1725 if (q_max < q_val) {
1726 float_raise(float_flag_overflow | float_flag_inexact, status);
1727 return (int16_t)q_max;
1730 return (int16_t)q_val;
1733 static inline int32 float64_to_q32(float64 a, float_status *status)
1736 int64 q_min = 0xffffffff80000000LL;
1737 int64 q_max = 0x000000007fffffffLL;
1741 if (float64_is_any_nan(a)) {
1742 float_raise(float_flag_invalid, status);
1747 a = float64_scalbn(a, 31, status);
1749 ieee_ex = get_float_exception_flags(status);
1750 set_float_exception_flags(ieee_ex & (~float_flag_underflow)
1753 if (ieee_ex & float_flag_overflow) {
1754 float_raise(float_flag_inexact, status);
1755 return (int64)a < 0 ? q_min : q_max;
1758 /* conversion to integer */
1759 q_val = float64_to_int64(a, status);
1761 ieee_ex = get_float_exception_flags(status);
1762 set_float_exception_flags(ieee_ex & (~float_flag_underflow)
1765 if (ieee_ex & float_flag_invalid) {
1766 set_float_exception_flags(ieee_ex & (~float_flag_invalid)
1768 float_raise(float_flag_overflow | float_flag_inexact, status);
1769 return (int64)a < 0 ? q_min : q_max;
1772 if (q_val < q_min) {
1773 float_raise(float_flag_overflow | float_flag_inexact, status);
1774 return (int32)q_min;
1777 if (q_max < q_val) {
1778 float_raise(float_flag_overflow | float_flag_inexact, status);
1779 return (int32)q_max;
1782 return (int32)q_val;
1785 #define MSA_FLOAT_COND(DEST, OP, ARG1, ARG2, BITS, QUIET) \
1787 float_status *status = &env->active_tc.msa_fp_status; \
1790 set_float_exception_flags(0, status); \
1792 cond = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
1794 cond = float ## BITS ## _ ## OP ## _quiet(ARG1, ARG2, status); \
1796 DEST = cond ? M_MAX_UINT(BITS) : 0; \
1797 c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \
1799 if (get_enabled_exceptions(env, c)) { \
1800 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
1804 #define MSA_FLOAT_AF(DEST, ARG1, ARG2, BITS, QUIET) \
1806 MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
1807 if ((DEST & M_MAX_UINT(BITS)) == M_MAX_UINT(BITS)) { \
1812 #define MSA_FLOAT_UEQ(DEST, ARG1, ARG2, BITS, QUIET) \
1814 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1816 MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
1820 #define MSA_FLOAT_NE(DEST, ARG1, ARG2, BITS, QUIET) \
1822 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1824 MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
1828 #define MSA_FLOAT_UNE(DEST, ARG1, ARG2, BITS, QUIET) \
1830 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1832 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1834 MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
1839 #define MSA_FLOAT_ULE(DEST, ARG1, ARG2, BITS, QUIET) \
1841 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1843 MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
1847 #define MSA_FLOAT_ULT(DEST, ARG1, ARG2, BITS, QUIET) \
1849 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1851 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1855 #define MSA_FLOAT_OR(DEST, ARG1, ARG2, BITS, QUIET) \
1857 MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
1859 MSA_FLOAT_COND(DEST, le, ARG2, ARG1, BITS, QUIET); \
1863 static inline void compare_af(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1864 wr_t *pwt, uint32_t df, int quiet)
1866 wr_t wx, *pwx = &wx;
1869 clear_msacsr_cause(env);
1873 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1874 MSA_FLOAT_AF(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
1878 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1879 MSA_FLOAT_AF(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
1886 check_msacsr_cause(env);
1888 msa_move_v(pwd, pwx);
1891 static inline void compare_un(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1892 wr_t *pwt, uint32_t df, int quiet)
1894 wr_t wx, *pwx = &wx;
1897 clear_msacsr_cause(env);
1901 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1902 MSA_FLOAT_COND(pwx->w[i], unordered, pws->w[i], pwt->w[i], 32,
1907 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1908 MSA_FLOAT_COND(pwx->d[i], unordered, pws->d[i], pwt->d[i], 64,
1916 check_msacsr_cause(env);
1918 msa_move_v(pwd, pwx);
1921 static inline void compare_eq(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1922 wr_t *pwt, uint32_t df, int quiet)
1924 wr_t wx, *pwx = &wx;
1927 clear_msacsr_cause(env);
1931 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1932 MSA_FLOAT_COND(pwx->w[i], eq, pws->w[i], pwt->w[i], 32, quiet);
1936 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1937 MSA_FLOAT_COND(pwx->d[i], eq, pws->d[i], pwt->d[i], 64, quiet);
1944 check_msacsr_cause(env);
1946 msa_move_v(pwd, pwx);
1949 static inline void compare_ueq(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1950 wr_t *pwt, uint32_t df, int quiet)
1952 wr_t wx, *pwx = &wx;
1955 clear_msacsr_cause(env);
1959 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1960 MSA_FLOAT_UEQ(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
1964 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1965 MSA_FLOAT_UEQ(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
1972 check_msacsr_cause(env);
1974 msa_move_v(pwd, pwx);
1977 static inline void compare_lt(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1978 wr_t *pwt, uint32_t df, int quiet)
1980 wr_t wx, *pwx = &wx;
1983 clear_msacsr_cause(env);
1987 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1988 MSA_FLOAT_COND(pwx->w[i], lt, pws->w[i], pwt->w[i], 32, quiet);
1992 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1993 MSA_FLOAT_COND(pwx->d[i], lt, pws->d[i], pwt->d[i], 64, quiet);
2000 check_msacsr_cause(env);
2002 msa_move_v(pwd, pwx);
2005 static inline void compare_ult(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2006 wr_t *pwt, uint32_t df, int quiet)
2008 wr_t wx, *pwx = &wx;
2011 clear_msacsr_cause(env);
2015 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2016 MSA_FLOAT_ULT(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2020 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2021 MSA_FLOAT_ULT(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2028 check_msacsr_cause(env);
2030 msa_move_v(pwd, pwx);
2033 static inline void compare_le(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2034 wr_t *pwt, uint32_t df, int quiet)
2036 wr_t wx, *pwx = &wx;
2039 clear_msacsr_cause(env);
2043 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2044 MSA_FLOAT_COND(pwx->w[i], le, pws->w[i], pwt->w[i], 32, quiet);
2048 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2049 MSA_FLOAT_COND(pwx->d[i], le, pws->d[i], pwt->d[i], 64, quiet);
2056 check_msacsr_cause(env);
2058 msa_move_v(pwd, pwx);
2061 static inline void compare_ule(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2062 wr_t *pwt, uint32_t df, int quiet)
2064 wr_t wx, *pwx = &wx;
2067 clear_msacsr_cause(env);
2071 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2072 MSA_FLOAT_ULE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2076 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2077 MSA_FLOAT_ULE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2084 check_msacsr_cause(env);
2086 msa_move_v(pwd, pwx);
2089 static inline void compare_or(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2090 wr_t *pwt, uint32_t df, int quiet)
2092 wr_t wx, *pwx = &wx;
2095 clear_msacsr_cause(env);
2099 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2100 MSA_FLOAT_OR(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2104 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2105 MSA_FLOAT_OR(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2112 check_msacsr_cause(env);
2114 msa_move_v(pwd, pwx);
2117 static inline void compare_une(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2118 wr_t *pwt, uint32_t df, int quiet)
2120 wr_t wx, *pwx = &wx;
2123 clear_msacsr_cause(env);
2127 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2128 MSA_FLOAT_UNE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2132 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2133 MSA_FLOAT_UNE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2140 check_msacsr_cause(env);
2142 msa_move_v(pwd, pwx);
2145 static inline void compare_ne(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2146 wr_t *pwt, uint32_t df, int quiet) {
2147 wr_t wx, *pwx = &wx;
2150 clear_msacsr_cause(env);
2154 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2155 MSA_FLOAT_NE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2159 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2160 MSA_FLOAT_NE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2167 check_msacsr_cause(env);
2169 msa_move_v(pwd, pwx);
2172 void helper_msa_fcaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2173 uint32_t ws, uint32_t wt)
2175 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2176 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2177 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2178 compare_af(env, pwd, pws, pwt, df, 1);
2181 void helper_msa_fcun_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2182 uint32_t ws, uint32_t wt)
2184 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2185 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2186 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2187 compare_un(env, pwd, pws, pwt, df, 1);
2190 void helper_msa_fceq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2191 uint32_t ws, uint32_t wt)
2193 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2194 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2195 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2196 compare_eq(env, pwd, pws, pwt, df, 1);
2199 void helper_msa_fcueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2200 uint32_t ws, uint32_t wt)
2202 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2203 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2204 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2205 compare_ueq(env, pwd, pws, pwt, df, 1);
2208 void helper_msa_fclt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2209 uint32_t ws, uint32_t wt)
2211 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2212 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2213 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2214 compare_lt(env, pwd, pws, pwt, df, 1);
2217 void helper_msa_fcult_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2218 uint32_t ws, uint32_t wt)
2220 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2221 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2222 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2223 compare_ult(env, pwd, pws, pwt, df, 1);
2226 void helper_msa_fcle_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2227 uint32_t ws, uint32_t wt)
2229 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2230 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2231 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2232 compare_le(env, pwd, pws, pwt, df, 1);
2235 void helper_msa_fcule_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2236 uint32_t ws, uint32_t wt)
2238 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2239 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2240 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2241 compare_ule(env, pwd, pws, pwt, df, 1);
2244 void helper_msa_fsaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2245 uint32_t ws, uint32_t wt)
2247 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2248 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2249 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2250 compare_af(env, pwd, pws, pwt, df, 0);
2253 void helper_msa_fsun_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2254 uint32_t ws, uint32_t wt)
2256 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2257 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2258 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2259 compare_un(env, pwd, pws, pwt, df, 0);
2262 void helper_msa_fseq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2263 uint32_t ws, uint32_t wt)
2265 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2266 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2267 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2268 compare_eq(env, pwd, pws, pwt, df, 0);
2271 void helper_msa_fsueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2272 uint32_t ws, uint32_t wt)
2274 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2275 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2276 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2277 compare_ueq(env, pwd, pws, pwt, df, 0);
2280 void helper_msa_fslt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2281 uint32_t ws, uint32_t wt)
2283 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2284 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2285 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2286 compare_lt(env, pwd, pws, pwt, df, 0);
2289 void helper_msa_fsult_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2290 uint32_t ws, uint32_t wt)
2292 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2293 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2294 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2295 compare_ult(env, pwd, pws, pwt, df, 0);
2298 void helper_msa_fsle_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2299 uint32_t ws, uint32_t wt)
2301 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2302 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2303 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2304 compare_le(env, pwd, pws, pwt, df, 0);
2307 void helper_msa_fsule_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2308 uint32_t ws, uint32_t wt)
2310 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2311 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2312 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2313 compare_ule(env, pwd, pws, pwt, df, 0);
2316 void helper_msa_fcor_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2317 uint32_t ws, uint32_t wt)
2319 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2320 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2321 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2322 compare_or(env, pwd, pws, pwt, df, 1);
2325 void helper_msa_fcune_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2326 uint32_t ws, uint32_t wt)
2328 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2329 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2330 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2331 compare_une(env, pwd, pws, pwt, df, 1);
2334 void helper_msa_fcne_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2335 uint32_t ws, uint32_t wt)
2337 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2338 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2339 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2340 compare_ne(env, pwd, pws, pwt, df, 1);
2343 void helper_msa_fsor_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2344 uint32_t ws, uint32_t wt)
2346 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2347 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2348 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2349 compare_or(env, pwd, pws, pwt, df, 0);
2352 void helper_msa_fsune_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2353 uint32_t ws, uint32_t wt)
2355 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2356 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2357 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2358 compare_une(env, pwd, pws, pwt, df, 0);
2361 void helper_msa_fsne_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2362 uint32_t ws, uint32_t wt)
2364 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2365 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2366 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2367 compare_ne(env, pwd, pws, pwt, df, 0);
2370 #define float16_is_zero(ARG) 0
2371 #define float16_is_zero_or_denormal(ARG) 0
2373 #define IS_DENORMAL(ARG, BITS) \
2374 (!float ## BITS ## _is_zero(ARG) \
2375 && float ## BITS ## _is_zero_or_denormal(ARG))
2377 #define MSA_FLOAT_BINOP(DEST, OP, ARG1, ARG2, BITS) \
2379 float_status *status = &env->active_tc.msa_fp_status; \
2382 set_float_exception_flags(0, status); \
2383 DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
2384 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2386 if (get_enabled_exceptions(env, c)) { \
2387 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2391 void helper_msa_fadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2392 uint32_t ws, uint32_t wt)
2394 wr_t wx, *pwx = &wx;
2395 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2396 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2397 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2400 clear_msacsr_cause(env);
2404 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2405 MSA_FLOAT_BINOP(pwx->w[i], add, pws->w[i], pwt->w[i], 32);
2409 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2410 MSA_FLOAT_BINOP(pwx->d[i], add, pws->d[i], pwt->d[i], 64);
2417 check_msacsr_cause(env);
2418 msa_move_v(pwd, pwx);
2421 void helper_msa_fsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2422 uint32_t ws, uint32_t wt)
2424 wr_t wx, *pwx = &wx;
2425 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2426 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2427 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2430 clear_msacsr_cause(env);
2434 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2435 MSA_FLOAT_BINOP(pwx->w[i], sub, pws->w[i], pwt->w[i], 32);
2439 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2440 MSA_FLOAT_BINOP(pwx->d[i], sub, pws->d[i], pwt->d[i], 64);
2447 check_msacsr_cause(env);
2448 msa_move_v(pwd, pwx);
2451 void helper_msa_fmul_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2452 uint32_t ws, uint32_t wt)
2454 wr_t wx, *pwx = &wx;
2455 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2456 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2457 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2460 clear_msacsr_cause(env);
2464 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2465 MSA_FLOAT_BINOP(pwx->w[i], mul, pws->w[i], pwt->w[i], 32);
2469 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2470 MSA_FLOAT_BINOP(pwx->d[i], mul, pws->d[i], pwt->d[i], 64);
2477 check_msacsr_cause(env);
2479 msa_move_v(pwd, pwx);
2482 void helper_msa_fdiv_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2483 uint32_t ws, uint32_t wt)
2485 wr_t wx, *pwx = &wx;
2486 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2487 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2488 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2491 clear_msacsr_cause(env);
2495 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2496 MSA_FLOAT_BINOP(pwx->w[i], div, pws->w[i], pwt->w[i], 32);
2500 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2501 MSA_FLOAT_BINOP(pwx->d[i], div, pws->d[i], pwt->d[i], 64);
2508 check_msacsr_cause(env);
2510 msa_move_v(pwd, pwx);
2513 #define MSA_FLOAT_MULADD(DEST, ARG1, ARG2, ARG3, NEGATE, BITS) \
2515 float_status *status = &env->active_tc.msa_fp_status; \
2518 set_float_exception_flags(0, status); \
2519 DEST = float ## BITS ## _muladd(ARG2, ARG3, ARG1, NEGATE, status); \
2520 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2522 if (get_enabled_exceptions(env, c)) { \
2523 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2527 void helper_msa_fmadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2528 uint32_t ws, uint32_t wt)
2530 wr_t wx, *pwx = &wx;
2531 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2532 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2533 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2536 clear_msacsr_cause(env);
2540 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2541 MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i],
2542 pws->w[i], pwt->w[i], 0, 32);
2546 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2547 MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i],
2548 pws->d[i], pwt->d[i], 0, 64);
2555 check_msacsr_cause(env);
2557 msa_move_v(pwd, pwx);
2560 void helper_msa_fmsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2561 uint32_t ws, uint32_t wt)
2563 wr_t wx, *pwx = &wx;
2564 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2565 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2566 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2569 clear_msacsr_cause(env);
2573 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2574 MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i],
2575 pws->w[i], pwt->w[i],
2576 float_muladd_negate_product, 32);
2580 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2581 MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i],
2582 pws->d[i], pwt->d[i],
2583 float_muladd_negate_product, 64);
2590 check_msacsr_cause(env);
2592 msa_move_v(pwd, pwx);
2595 void helper_msa_fexp2_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2596 uint32_t ws, uint32_t wt)
2598 wr_t wx, *pwx = &wx;
2599 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2600 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2601 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2604 clear_msacsr_cause(env);
2608 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2609 MSA_FLOAT_BINOP(pwx->w[i], scalbn, pws->w[i],
2610 pwt->w[i] > 0x200 ? 0x200 :
2611 pwt->w[i] < -0x200 ? -0x200 : pwt->w[i],
2616 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2617 MSA_FLOAT_BINOP(pwx->d[i], scalbn, pws->d[i],
2618 pwt->d[i] > 0x1000 ? 0x1000 :
2619 pwt->d[i] < -0x1000 ? -0x1000 : pwt->d[i],
2627 check_msacsr_cause(env);
2629 msa_move_v(pwd, pwx);
2632 #define MSA_FLOAT_UNOP(DEST, OP, ARG, BITS) \
2634 float_status *status = &env->active_tc.msa_fp_status; \
2637 set_float_exception_flags(0, status); \
2638 DEST = float ## BITS ## _ ## OP(ARG, status); \
2639 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2641 if (get_enabled_exceptions(env, c)) { \
2642 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2646 void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2647 uint32_t ws, uint32_t wt)
2649 wr_t wx, *pwx = &wx;
2650 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2651 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2652 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2657 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2658 /* Half precision floats come in two formats: standard
2659 IEEE and "ARM" format. The latter gains extra exponent
2660 range by omitting the NaN/Inf encodings. */
2663 MSA_FLOAT_BINOP(Lh(pwx, i), from_float32, pws->w[i], ieee, 16);
2664 MSA_FLOAT_BINOP(Rh(pwx, i), from_float32, pwt->w[i], ieee, 16);
2668 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2669 MSA_FLOAT_UNOP(Lw(pwx, i), from_float64, pws->d[i], 32);
2670 MSA_FLOAT_UNOP(Rw(pwx, i), from_float64, pwt->d[i], 32);
2677 check_msacsr_cause(env);
2678 msa_move_v(pwd, pwx);
2681 #define MSA_FLOAT_UNOP_XD(DEST, OP, ARG, BITS, XBITS) \
2683 float_status *status = &env->active_tc.msa_fp_status; \
2686 set_float_exception_flags(0, status); \
2687 DEST = float ## BITS ## _ ## OP(ARG, status); \
2688 c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
2690 if (get_enabled_exceptions(env, c)) { \
2691 DEST = ((FLOAT_SNAN ## XBITS >> 6) << 6) | c; \
2695 void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2696 uint32_t ws, uint32_t wt)
2698 wr_t wx, *pwx = &wx;
2699 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2700 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2701 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2704 clear_msacsr_cause(env);
2708 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2709 MSA_FLOAT_UNOP_XD(Lh(pwx, i), to_q16, pws->w[i], 32, 16);
2710 MSA_FLOAT_UNOP_XD(Rh(pwx, i), to_q16, pwt->w[i], 32, 16);
2714 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2715 MSA_FLOAT_UNOP_XD(Lw(pwx, i), to_q32, pws->d[i], 64, 32);
2716 MSA_FLOAT_UNOP_XD(Rw(pwx, i), to_q32, pwt->d[i], 64, 32);
2723 check_msacsr_cause(env);
2725 msa_move_v(pwd, pwx);
2728 #define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS) \
2729 !float ## BITS ## _is_any_nan(ARG1) \
2730 && float ## BITS ## _is_quiet_nan(ARG2)
2732 #define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \
2734 float_status *status = &env->active_tc.msa_fp_status; \
2737 set_float_exception_flags(0, status); \
2738 DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
2739 c = update_msacsr(env, 0, 0); \
2741 if (get_enabled_exceptions(env, c)) { \
2742 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2746 #define FMAXMIN_A(F, G, X, _S, _T, BITS) \
2748 uint## BITS ##_t S = _S, T = _T; \
2749 uint## BITS ##_t as, at, xs, xt, xd; \
2750 if (NUMBER_QNAN_PAIR(S, T, BITS)) { \
2753 else if (NUMBER_QNAN_PAIR(T, S, BITS)) { \
2756 as = float## BITS ##_abs(S); \
2757 at = float## BITS ##_abs(T); \
2758 MSA_FLOAT_MAXOP(xs, F, S, T, BITS); \
2759 MSA_FLOAT_MAXOP(xt, G, S, T, BITS); \
2760 MSA_FLOAT_MAXOP(xd, F, as, at, BITS); \
2761 X = (as == at || xd == float## BITS ##_abs(xs)) ? xs : xt; \
2764 void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2765 uint32_t ws, uint32_t wt)
2767 wr_t wx, *pwx = &wx;
2768 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2769 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2770 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2773 clear_msacsr_cause(env);
2777 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2778 if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32)) {
2779 MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pws->w[i], 32);
2780 } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32)) {
2781 MSA_FLOAT_MAXOP(pwx->w[i], min, pwt->w[i], pwt->w[i], 32);
2783 MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pwt->w[i], 32);
2788 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2789 if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64)) {
2790 MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pws->d[i], 64);
2791 } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64)) {
2792 MSA_FLOAT_MAXOP(pwx->d[i], min, pwt->d[i], pwt->d[i], 64);
2794 MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pwt->d[i], 64);
2802 check_msacsr_cause(env);
2804 msa_move_v(pwd, pwx);
2807 void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2808 uint32_t ws, uint32_t wt)
2810 wr_t wx, *pwx = &wx;
2811 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2812 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2813 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2816 clear_msacsr_cause(env);
2820 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2821 FMAXMIN_A(min, max, pwx->w[i], pws->w[i], pwt->w[i], 32);
2825 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2826 FMAXMIN_A(min, max, pwx->d[i], pws->d[i], pwt->d[i], 64);
2833 check_msacsr_cause(env);
2835 msa_move_v(pwd, pwx);
2838 void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2839 uint32_t ws, uint32_t wt)
2841 wr_t wx, *pwx = &wx;
2842 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2843 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2844 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2847 clear_msacsr_cause(env);
2851 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2852 if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32)) {
2853 MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pws->w[i], 32);
2854 } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32)) {
2855 MSA_FLOAT_MAXOP(pwx->w[i], max, pwt->w[i], pwt->w[i], 32);
2857 MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pwt->w[i], 32);
2862 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2863 if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64)) {
2864 MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pws->d[i], 64);
2865 } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64)) {
2866 MSA_FLOAT_MAXOP(pwx->d[i], max, pwt->d[i], pwt->d[i], 64);
2868 MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pwt->d[i], 64);
2876 check_msacsr_cause(env);
2878 msa_move_v(pwd, pwx);
2881 void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2882 uint32_t ws, uint32_t wt)
2884 wr_t wx, *pwx = &wx;
2885 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2886 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2887 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2890 clear_msacsr_cause(env);
2894 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2895 FMAXMIN_A(max, min, pwx->w[i], pws->w[i], pwt->w[i], 32);
2899 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2900 FMAXMIN_A(max, min, pwx->d[i], pws->d[i], pwt->d[i], 64);
2907 check_msacsr_cause(env);
2909 msa_move_v(pwd, pwx);
2912 void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df,
2913 uint32_t wd, uint32_t ws)
2915 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2916 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2917 if (df == DF_WORD) {
2918 pwd->w[0] = helper_float_class_s(pws->w[0]);
2919 pwd->w[1] = helper_float_class_s(pws->w[1]);
2920 pwd->w[2] = helper_float_class_s(pws->w[2]);
2921 pwd->w[3] = helper_float_class_s(pws->w[3]);
2923 pwd->d[0] = helper_float_class_d(pws->d[0]);
2924 pwd->d[1] = helper_float_class_d(pws->d[1]);
2928 #define MSA_FLOAT_UNOP0(DEST, OP, ARG, BITS) \
2930 float_status *status = &env->active_tc.msa_fp_status; \
2933 set_float_exception_flags(0, status); \
2934 DEST = float ## BITS ## _ ## OP(ARG, status); \
2935 c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
2937 if (get_enabled_exceptions(env, c)) { \
2938 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2939 } else if (float ## BITS ## _is_any_nan(ARG)) { \
2944 void helper_msa_ftrunc_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2947 wr_t wx, *pwx = &wx;
2948 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2949 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2952 clear_msacsr_cause(env);
2956 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2957 MSA_FLOAT_UNOP0(pwx->w[i], to_int32_round_to_zero, pws->w[i], 32);
2961 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2962 MSA_FLOAT_UNOP0(pwx->d[i], to_int64_round_to_zero, pws->d[i], 64);
2969 check_msacsr_cause(env);
2971 msa_move_v(pwd, pwx);
2974 void helper_msa_ftrunc_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2977 wr_t wx, *pwx = &wx;
2978 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2979 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2982 clear_msacsr_cause(env);
2986 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2987 MSA_FLOAT_UNOP0(pwx->w[i], to_uint32_round_to_zero, pws->w[i], 32);
2991 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2992 MSA_FLOAT_UNOP0(pwx->d[i], to_uint64_round_to_zero, pws->d[i], 64);
2999 check_msacsr_cause(env);
3001 msa_move_v(pwd, pwx);
3004 void helper_msa_fsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3007 wr_t wx, *pwx = &wx;
3008 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3009 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3012 clear_msacsr_cause(env);
3016 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3017 MSA_FLOAT_UNOP(pwx->w[i], sqrt, pws->w[i], 32);
3021 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3022 MSA_FLOAT_UNOP(pwx->d[i], sqrt, pws->d[i], 64);
3029 check_msacsr_cause(env);
3031 msa_move_v(pwd, pwx);
3034 #define MSA_FLOAT_RECIPROCAL(DEST, ARG, BITS) \
3036 float_status *status = &env->active_tc.msa_fp_status; \
3039 set_float_exception_flags(0, status); \
3040 DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, status); \
3041 c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \
3042 float ## BITS ## _is_quiet_nan(DEST) ? \
3043 0 : RECIPROCAL_INEXACT, \
3044 IS_DENORMAL(DEST, BITS)); \
3046 if (get_enabled_exceptions(env, c)) { \
3047 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
3051 void helper_msa_frsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3054 wr_t wx, *pwx = &wx;
3055 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3056 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3059 clear_msacsr_cause(env);
3063 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3064 MSA_FLOAT_RECIPROCAL(pwx->w[i], float32_sqrt(pws->w[i],
3065 &env->active_tc.msa_fp_status), 32);
3069 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3070 MSA_FLOAT_RECIPROCAL(pwx->d[i], float64_sqrt(pws->d[i],
3071 &env->active_tc.msa_fp_status), 64);
3078 check_msacsr_cause(env);
3080 msa_move_v(pwd, pwx);
3083 void helper_msa_frcp_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3086 wr_t wx, *pwx = &wx;
3087 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3088 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3091 clear_msacsr_cause(env);
3095 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3096 MSA_FLOAT_RECIPROCAL(pwx->w[i], pws->w[i], 32);
3100 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3101 MSA_FLOAT_RECIPROCAL(pwx->d[i], pws->d[i], 64);
3108 check_msacsr_cause(env);
3110 msa_move_v(pwd, pwx);
3113 void helper_msa_frint_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3116 wr_t wx, *pwx = &wx;
3117 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3118 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3121 clear_msacsr_cause(env);
3125 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3126 MSA_FLOAT_UNOP(pwx->w[i], round_to_int, pws->w[i], 32);
3130 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3131 MSA_FLOAT_UNOP(pwx->d[i], round_to_int, pws->d[i], 64);
3138 check_msacsr_cause(env);
3140 msa_move_v(pwd, pwx);
3143 #define MSA_FLOAT_LOGB(DEST, ARG, BITS) \
3145 float_status *status = &env->active_tc.msa_fp_status; \
3148 set_float_exception_flags(0, status); \
3149 set_float_rounding_mode(float_round_down, status); \
3150 DEST = float ## BITS ## _ ## log2(ARG, status); \
3151 DEST = float ## BITS ## _ ## round_to_int(DEST, status); \
3152 set_float_rounding_mode(ieee_rm[(env->active_tc.msacsr & \
3153 MSACSR_RM_MASK) >> MSACSR_RM], \
3156 set_float_exception_flags(get_float_exception_flags(status) & \
3157 (~float_flag_inexact), \
3160 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
3162 if (get_enabled_exceptions(env, c)) { \
3163 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
3167 void helper_msa_flog2_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3170 wr_t wx, *pwx = &wx;
3171 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3172 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3175 clear_msacsr_cause(env);
3179 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3180 MSA_FLOAT_LOGB(pwx->w[i], pws->w[i], 32);
3184 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3185 MSA_FLOAT_LOGB(pwx->d[i], pws->d[i], 64);
3192 check_msacsr_cause(env);
3194 msa_move_v(pwd, pwx);
3197 void helper_msa_fexupl_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3200 wr_t wx, *pwx = &wx;
3201 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3202 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3207 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3208 /* Half precision floats come in two formats: standard
3209 IEEE and "ARM" format. The latter gains extra exponent
3210 range by omitting the NaN/Inf encodings. */
3213 MSA_FLOAT_BINOP(pwx->w[i], from_float16, Lh(pws, i), ieee, 32);
3217 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3218 MSA_FLOAT_UNOP(pwx->d[i], from_float32, Lw(pws, i), 64);
3225 check_msacsr_cause(env);
3226 msa_move_v(pwd, pwx);
3229 void helper_msa_fexupr_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3232 wr_t wx, *pwx = &wx;
3233 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3234 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3239 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3240 /* Half precision floats come in two formats: standard
3241 IEEE and "ARM" format. The latter gains extra exponent
3242 range by omitting the NaN/Inf encodings. */
3245 MSA_FLOAT_BINOP(pwx->w[i], from_float16, Rh(pws, i), ieee, 32);
3249 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3250 MSA_FLOAT_UNOP(pwx->d[i], from_float32, Rw(pws, i), 64);
3257 check_msacsr_cause(env);
3258 msa_move_v(pwd, pwx);
3261 void helper_msa_ffql_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3264 wr_t wx, *pwx = &wx;
3265 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3266 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3271 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3272 MSA_FLOAT_UNOP(pwx->w[i], from_q16, Lh(pws, i), 32);
3276 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3277 MSA_FLOAT_UNOP(pwx->d[i], from_q32, Lw(pws, i), 64);
3284 msa_move_v(pwd, pwx);
3287 void helper_msa_ffqr_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3290 wr_t wx, *pwx = &wx;
3291 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3292 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3297 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3298 MSA_FLOAT_UNOP(pwx->w[i], from_q16, Rh(pws, i), 32);
3302 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3303 MSA_FLOAT_UNOP(pwx->d[i], from_q32, Rw(pws, i), 64);
3310 msa_move_v(pwd, pwx);
3313 void helper_msa_ftint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3316 wr_t wx, *pwx = &wx;
3317 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3318 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3321 clear_msacsr_cause(env);
3325 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3326 MSA_FLOAT_UNOP0(pwx->w[i], to_int32, pws->w[i], 32);
3330 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3331 MSA_FLOAT_UNOP0(pwx->d[i], to_int64, pws->d[i], 64);
3338 check_msacsr_cause(env);
3340 msa_move_v(pwd, pwx);
3343 void helper_msa_ftint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3346 wr_t wx, *pwx = &wx;
3347 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3348 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3351 clear_msacsr_cause(env);
3355 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3356 MSA_FLOAT_UNOP0(pwx->w[i], to_uint32, pws->w[i], 32);
3360 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3361 MSA_FLOAT_UNOP0(pwx->d[i], to_uint64, pws->d[i], 64);
3368 check_msacsr_cause(env);
3370 msa_move_v(pwd, pwx);
3373 #define float32_from_int32 int32_to_float32
3374 #define float32_from_uint32 uint32_to_float32
3376 #define float64_from_int64 int64_to_float64
3377 #define float64_from_uint64 uint64_to_float64
3379 void helper_msa_ffint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3382 wr_t wx, *pwx = &wx;
3383 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3384 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3387 clear_msacsr_cause(env);
3391 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3392 MSA_FLOAT_UNOP(pwx->w[i], from_int32, pws->w[i], 32);
3396 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3397 MSA_FLOAT_UNOP(pwx->d[i], from_int64, pws->d[i], 64);
3404 check_msacsr_cause(env);
3406 msa_move_v(pwd, pwx);
3409 void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3412 wr_t wx, *pwx = &wx;
3413 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3414 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3417 clear_msacsr_cause(env);
3421 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3422 MSA_FLOAT_UNOP(pwx->w[i], from_uint32, pws->w[i], 32);
3426 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3427 MSA_FLOAT_UNOP(pwx->d[i], from_uint64, pws->d[i], 64);
3434 check_msacsr_cause(env);
3436 msa_move_v(pwd, pwx);