]>
Commit | Line | Data |
---|---|---|
37356079 RH |
1 | /* |
2 | * ARM VFP floating-point operations | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2.1 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
37356079 RH |
21 | #include "cpu.h" |
22 | #include "exec/helper-proto.h" | |
37356079 | 23 | #include "internals.h" |
4a15527c PMD |
24 | #ifdef CONFIG_TCG |
25 | #include "qemu/log.h" | |
26 | #include "fpu/softfloat.h" | |
27 | #endif | |
37356079 RH |
28 | |
29 | /* VFP support. We follow the convention used for VFP instructions: | |
30 | Single precision routines have a "s" suffix, double precision a | |
31 | "d" suffix. */ | |
32 | ||
4a15527c PMD |
33 | #ifdef CONFIG_TCG |
34 | ||
37356079 RH |
35 | /* Convert host exception flags to vfp form. */ |
36 | static inline int vfp_exceptbits_from_host(int host_bits) | |
37 | { | |
38 | int target_bits = 0; | |
39 | ||
9798ac71 | 40 | if (host_bits & float_flag_invalid) { |
37356079 | 41 | target_bits |= 1; |
9798ac71 PMD |
42 | } |
43 | if (host_bits & float_flag_divbyzero) { | |
37356079 | 44 | target_bits |= 2; |
9798ac71 PMD |
45 | } |
46 | if (host_bits & float_flag_overflow) { | |
37356079 | 47 | target_bits |= 4; |
9798ac71 PMD |
48 | } |
49 | if (host_bits & (float_flag_underflow | float_flag_output_denormal)) { | |
37356079 | 50 | target_bits |= 8; |
9798ac71 PMD |
51 | } |
52 | if (host_bits & float_flag_inexact) { | |
37356079 | 53 | target_bits |= 0x10; |
9798ac71 PMD |
54 | } |
55 | if (host_bits & float_flag_input_denormal) { | |
37356079 | 56 | target_bits |= 0x80; |
9798ac71 | 57 | } |
37356079 RH |
58 | return target_bits; |
59 | } | |
60 | ||
37356079 RH |
61 | /* Convert vfp exception flags to target form. */ |
62 | static inline int vfp_exceptbits_to_host(int target_bits) | |
63 | { | |
64 | int host_bits = 0; | |
65 | ||
9798ac71 | 66 | if (target_bits & 1) { |
37356079 | 67 | host_bits |= float_flag_invalid; |
9798ac71 PMD |
68 | } |
69 | if (target_bits & 2) { | |
37356079 | 70 | host_bits |= float_flag_divbyzero; |
9798ac71 PMD |
71 | } |
72 | if (target_bits & 4) { | |
37356079 | 73 | host_bits |= float_flag_overflow; |
9798ac71 PMD |
74 | } |
75 | if (target_bits & 8) { | |
37356079 | 76 | host_bits |= float_flag_underflow; |
9798ac71 PMD |
77 | } |
78 | if (target_bits & 0x10) { | |
37356079 | 79 | host_bits |= float_flag_inexact; |
9798ac71 PMD |
80 | } |
81 | if (target_bits & 0x80) { | |
37356079 | 82 | host_bits |= float_flag_input_denormal; |
9798ac71 | 83 | } |
37356079 RH |
84 | return host_bits; |
85 | } | |
86 | ||
0c6ad948 PMD |
87 | static uint32_t vfp_get_fpscr_from_host(CPUARMState *env) |
88 | { | |
89 | uint32_t i; | |
90 | ||
91 | i = get_float_exception_flags(&env->vfp.fp_status); | |
92 | i |= get_float_exception_flags(&env->vfp.standard_fp_status); | |
93 | /* FZ16 does not generate an input denormal exception. */ | |
94 | i |= (get_float_exception_flags(&env->vfp.fp_status_f16) | |
95 | & ~float_flag_input_denormal); | |
aaae563b PM |
96 | i |= (get_float_exception_flags(&env->vfp.standard_fp_status_f16) |
97 | & ~float_flag_input_denormal); | |
0c6ad948 PMD |
98 | return vfp_exceptbits_from_host(i); |
99 | } | |
100 | ||
e9d65282 PMD |
101 | static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val) |
102 | { | |
103 | int i; | |
104 | uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR]; | |
105 | ||
106 | changed ^= val; | |
107 | if (changed & (3 << 22)) { | |
108 | i = (val >> 22) & 3; | |
109 | switch (i) { | |
110 | case FPROUNDING_TIEEVEN: | |
111 | i = float_round_nearest_even; | |
112 | break; | |
113 | case FPROUNDING_POSINF: | |
114 | i = float_round_up; | |
115 | break; | |
116 | case FPROUNDING_NEGINF: | |
117 | i = float_round_down; | |
118 | break; | |
119 | case FPROUNDING_ZERO: | |
120 | i = float_round_to_zero; | |
121 | break; | |
122 | } | |
123 | set_float_rounding_mode(i, &env->vfp.fp_status); | |
124 | set_float_rounding_mode(i, &env->vfp.fp_status_f16); | |
125 | } | |
126 | if (changed & FPCR_FZ16) { | |
127 | bool ftz_enabled = val & FPCR_FZ16; | |
128 | set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16); | |
aaae563b | 129 | set_flush_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16); |
e9d65282 | 130 | set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16); |
aaae563b | 131 | set_flush_inputs_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16); |
e9d65282 PMD |
132 | } |
133 | if (changed & FPCR_FZ) { | |
134 | bool ftz_enabled = val & FPCR_FZ; | |
135 | set_flush_to_zero(ftz_enabled, &env->vfp.fp_status); | |
136 | set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status); | |
137 | } | |
138 | if (changed & FPCR_DN) { | |
139 | bool dnan_enabled = val & FPCR_DN; | |
140 | set_default_nan_mode(dnan_enabled, &env->vfp.fp_status); | |
141 | set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16); | |
142 | } | |
143 | ||
144 | /* | |
145 | * The exception flags are ORed together when we read fpscr so we | |
146 | * only need to preserve the current state in one of our | |
147 | * float_status values. | |
148 | */ | |
149 | i = vfp_exceptbits_to_host(val); | |
150 | set_float_exception_flags(i, &env->vfp.fp_status); | |
151 | set_float_exception_flags(0, &env->vfp.fp_status_f16); | |
152 | set_float_exception_flags(0, &env->vfp.standard_fp_status); | |
aaae563b | 153 | set_float_exception_flags(0, &env->vfp.standard_fp_status_f16); |
e9d65282 PMD |
154 | } |
155 | ||
4a15527c PMD |
156 | #else |
157 | ||
158 | static uint32_t vfp_get_fpscr_from_host(CPUARMState *env) | |
159 | { | |
160 | return 0; | |
161 | } | |
162 | ||
163 | static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val) | |
164 | { | |
165 | } | |
166 | ||
167 | #endif | |
168 | ||
20e62dd8 PMD |
169 | uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) |
170 | { | |
171 | uint32_t i, fpscr; | |
172 | ||
173 | fpscr = env->vfp.xregs[ARM_VFP_FPSCR] | |
174 | | (env->vfp.vec_len << 16) | |
175 | | (env->vfp.vec_stride << 20); | |
176 | ||
0c6ad948 | 177 | fpscr |= vfp_get_fpscr_from_host(env); |
20e62dd8 PMD |
178 | |
179 | i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3]; | |
180 | fpscr |= i ? FPCR_QC : 0; | |
181 | ||
182 | return fpscr; | |
183 | } | |
184 | ||
185 | uint32_t vfp_get_fpscr(CPUARMState *env) | |
186 | { | |
187 | return HELPER(vfp_get_fpscr)(env); | |
188 | } | |
189 | ||
37356079 RH |
190 | void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) |
191 | { | |
37356079 | 192 | /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ |
6e61f839 | 193 | if (!cpu_isar_feature(any_fp16, env_archcpu(env))) { |
37356079 RH |
194 | val &= ~FPCR_FZ16; |
195 | } | |
196 | ||
5bcf8ed9 PM |
197 | if (arm_feature(env, ARM_FEATURE_M)) { |
198 | /* | |
199 | * M profile FPSCR is RES0 for the QC, STRIDE, FZ16, LEN bits | |
200 | * and also for the trapped-exception-handling bits IxE. | |
201 | */ | |
202 | val &= 0xf7c0009f; | |
203 | } | |
204 | ||
85795187 PMD |
205 | vfp_set_fpscr_to_host(env, val); |
206 | ||
37356079 RH |
207 | /* |
208 | * We don't implement trapped exception handling, so the | |
209 | * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!) | |
210 | * | |
211 | * If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC | |
212 | * (which are stored in fp_status), and the other RES0 bits | |
213 | * in between, then we clear all of the low 16 bits. | |
214 | */ | |
215 | env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000; | |
216 | env->vfp.vec_len = (val >> 16) & 7; | |
217 | env->vfp.vec_stride = (val >> 20) & 3; | |
218 | ||
219 | /* | |
220 | * The bit we set within fpscr_q is arbitrary; the register as a | |
221 | * whole being zero/non-zero is what counts. | |
222 | */ | |
223 | env->vfp.qc[0] = val & FPCR_QC; | |
224 | env->vfp.qc[1] = 0; | |
225 | env->vfp.qc[2] = 0; | |
226 | env->vfp.qc[3] = 0; | |
37356079 RH |
227 | } |
228 | ||
229 | void vfp_set_fpscr(CPUARMState *env, uint32_t val) | |
230 | { | |
231 | HELPER(vfp_set_fpscr)(env, val); | |
232 | } | |
233 | ||
4a15527c PMD |
234 | #ifdef CONFIG_TCG |
235 | ||
37356079 RH |
236 | #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) |
237 | ||
238 | #define VFP_BINOP(name) \ | |
120a0eb3 PM |
239 | dh_ctype_f16 VFP_HELPER(name, h)(dh_ctype_f16 a, dh_ctype_f16 b, void *fpstp) \ |
240 | { \ | |
241 | float_status *fpst = fpstp; \ | |
242 | return float16_ ## name(a, b, fpst); \ | |
243 | } \ | |
37356079 RH |
244 | float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ |
245 | { \ | |
246 | float_status *fpst = fpstp; \ | |
247 | return float32_ ## name(a, b, fpst); \ | |
248 | } \ | |
249 | float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ | |
250 | { \ | |
251 | float_status *fpst = fpstp; \ | |
252 | return float64_ ## name(a, b, fpst); \ | |
253 | } | |
254 | VFP_BINOP(add) | |
255 | VFP_BINOP(sub) | |
256 | VFP_BINOP(mul) | |
257 | VFP_BINOP(div) | |
258 | VFP_BINOP(min) | |
259 | VFP_BINOP(max) | |
260 | VFP_BINOP(minnum) | |
261 | VFP_BINOP(maxnum) | |
262 | #undef VFP_BINOP | |
263 | ||
e7cb0ded PM |
264 | dh_ctype_f16 VFP_HELPER(neg, h)(dh_ctype_f16 a) |
265 | { | |
266 | return float16_chs(a); | |
267 | } | |
268 | ||
37356079 RH |
269 | float32 VFP_HELPER(neg, s)(float32 a) |
270 | { | |
271 | return float32_chs(a); | |
272 | } | |
273 | ||
274 | float64 VFP_HELPER(neg, d)(float64 a) | |
275 | { | |
276 | return float64_chs(a); | |
277 | } | |
278 | ||
ce2d65a5 PM |
279 | dh_ctype_f16 VFP_HELPER(abs, h)(dh_ctype_f16 a) |
280 | { | |
281 | return float16_abs(a); | |
282 | } | |
283 | ||
37356079 RH |
284 | float32 VFP_HELPER(abs, s)(float32 a) |
285 | { | |
286 | return float32_abs(a); | |
287 | } | |
288 | ||
289 | float64 VFP_HELPER(abs, d)(float64 a) | |
290 | { | |
291 | return float64_abs(a); | |
292 | } | |
293 | ||
ce2d65a5 PM |
294 | dh_ctype_f16 VFP_HELPER(sqrt, h)(dh_ctype_f16 a, CPUARMState *env) |
295 | { | |
296 | return float16_sqrt(a, &env->vfp.fp_status_f16); | |
297 | } | |
298 | ||
37356079 RH |
299 | float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) |
300 | { | |
301 | return float32_sqrt(a, &env->vfp.fp_status); | |
302 | } | |
303 | ||
304 | float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) | |
305 | { | |
306 | return float64_sqrt(a, &env->vfp.fp_status); | |
307 | } | |
308 | ||
71bfd65c | 309 | static void softfloat_to_vfp_compare(CPUARMState *env, FloatRelation cmp) |
37356079 RH |
310 | { |
311 | uint32_t flags; | |
312 | switch (cmp) { | |
313 | case float_relation_equal: | |
314 | flags = 0x6; | |
315 | break; | |
316 | case float_relation_less: | |
317 | flags = 0x8; | |
318 | break; | |
319 | case float_relation_greater: | |
320 | flags = 0x2; | |
321 | break; | |
322 | case float_relation_unordered: | |
323 | flags = 0x3; | |
324 | break; | |
325 | default: | |
326 | g_assert_not_reached(); | |
327 | } | |
328 | env->vfp.xregs[ARM_VFP_FPSCR] = | |
329 | deposit32(env->vfp.xregs[ARM_VFP_FPSCR], 28, 4, flags); | |
330 | } | |
331 | ||
332 | /* XXX: check quiet/signaling case */ | |
1b88b054 PM |
333 | #define DO_VFP_cmp(P, FLOATTYPE, ARGTYPE, FPST) \ |
334 | void VFP_HELPER(cmp, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \ | |
37356079 RH |
335 | { \ |
336 | softfloat_to_vfp_compare(env, \ | |
1b88b054 | 337 | FLOATTYPE ## _compare_quiet(a, b, &env->vfp.FPST)); \ |
37356079 | 338 | } \ |
1b88b054 | 339 | void VFP_HELPER(cmpe, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \ |
37356079 RH |
340 | { \ |
341 | softfloat_to_vfp_compare(env, \ | |
1b88b054 | 342 | FLOATTYPE ## _compare(a, b, &env->vfp.FPST)); \ |
37356079 | 343 | } |
1b88b054 PM |
344 | DO_VFP_cmp(h, float16, dh_ctype_f16, fp_status_f16) |
345 | DO_VFP_cmp(s, float32, float32, fp_status) | |
346 | DO_VFP_cmp(d, float64, float64, fp_status) | |
37356079 RH |
347 | #undef DO_VFP_cmp |
348 | ||
349 | /* Integer to float and float to integer conversions */ | |
350 | ||
351 | #define CONV_ITOF(name, ftype, fsz, sign) \ | |
352 | ftype HELPER(name)(uint32_t x, void *fpstp) \ | |
353 | { \ | |
354 | float_status *fpst = fpstp; \ | |
355 | return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ | |
356 | } | |
357 | ||
358 | #define CONV_FTOI(name, ftype, fsz, sign, round) \ | |
359 | sign##int32_t HELPER(name)(ftype x, void *fpstp) \ | |
360 | { \ | |
361 | float_status *fpst = fpstp; \ | |
362 | if (float##fsz##_is_any_nan(x)) { \ | |
363 | float_raise(float_flag_invalid, fpst); \ | |
364 | return 0; \ | |
365 | } \ | |
366 | return float##fsz##_to_##sign##int32##round(x, fpst); \ | |
367 | } | |
368 | ||
369 | #define FLOAT_CONVS(name, p, ftype, fsz, sign) \ | |
370 | CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \ | |
371 | CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \ | |
372 | CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero) | |
373 | ||
374 | FLOAT_CONVS(si, h, uint32_t, 16, ) | |
375 | FLOAT_CONVS(si, s, float32, 32, ) | |
376 | FLOAT_CONVS(si, d, float64, 64, ) | |
377 | FLOAT_CONVS(ui, h, uint32_t, 16, u) | |
378 | FLOAT_CONVS(ui, s, float32, 32, u) | |
379 | FLOAT_CONVS(ui, d, float64, 64, u) | |
380 | ||
381 | #undef CONV_ITOF | |
382 | #undef CONV_FTOI | |
383 | #undef FLOAT_CONVS | |
384 | ||
385 | /* floating point conversion */ | |
386 | float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) | |
387 | { | |
388 | return float32_to_float64(x, &env->vfp.fp_status); | |
389 | } | |
390 | ||
391 | float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) | |
392 | { | |
393 | return float64_to_float32(x, &env->vfp.fp_status); | |
394 | } | |
395 | ||
396 | /* VFP3 fixed point conversion. */ | |
5366f6ad PM |
397 | #define VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \ |
398 | ftype HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ | |
37356079 RH |
399 | void *fpstp) \ |
400 | { return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); } | |
401 | ||
5366f6ad PM |
402 | #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, ROUND, suff) \ |
403 | uint##isz##_t HELPER(vfp_to##name##p##suff)(ftype x, uint32_t shift, \ | |
37356079 RH |
404 | void *fpst) \ |
405 | { \ | |
406 | if (unlikely(float##fsz##_is_any_nan(x))) { \ | |
407 | float_raise(float_flag_invalid, fpst); \ | |
408 | return 0; \ | |
409 | } \ | |
410 | return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \ | |
411 | } | |
412 | ||
5366f6ad PM |
413 | #define VFP_CONV_FIX(name, p, fsz, ftype, isz, itype) \ |
414 | VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \ | |
415 | VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \ | |
37356079 | 416 | float_round_to_zero, _round_to_zero) \ |
5366f6ad | 417 | VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \ |
37356079 RH |
418 | get_float_rounding_mode(fpst), ) |
419 | ||
5366f6ad PM |
420 | #define VFP_CONV_FIX_A64(name, p, fsz, ftype, isz, itype) \ |
421 | VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \ | |
422 | VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \ | |
37356079 RH |
423 | get_float_rounding_mode(fpst), ) |
424 | ||
5366f6ad PM |
425 | VFP_CONV_FIX(sh, d, 64, float64, 64, int16) |
426 | VFP_CONV_FIX(sl, d, 64, float64, 64, int32) | |
427 | VFP_CONV_FIX_A64(sq, d, 64, float64, 64, int64) | |
428 | VFP_CONV_FIX(uh, d, 64, float64, 64, uint16) | |
429 | VFP_CONV_FIX(ul, d, 64, float64, 64, uint32) | |
430 | VFP_CONV_FIX_A64(uq, d, 64, float64, 64, uint64) | |
431 | VFP_CONV_FIX(sh, s, 32, float32, 32, int16) | |
432 | VFP_CONV_FIX(sl, s, 32, float32, 32, int32) | |
433 | VFP_CONV_FIX_A64(sq, s, 32, float32, 64, int64) | |
434 | VFP_CONV_FIX(uh, s, 32, float32, 32, uint16) | |
435 | VFP_CONV_FIX(ul, s, 32, float32, 32, uint32) | |
436 | VFP_CONV_FIX_A64(uq, s, 32, float32, 64, uint64) | |
414ba270 PM |
437 | VFP_CONV_FIX(sh, h, 16, dh_ctype_f16, 32, int16) |
438 | VFP_CONV_FIX(sl, h, 16, dh_ctype_f16, 32, int32) | |
439 | VFP_CONV_FIX_A64(sq, h, 16, dh_ctype_f16, 64, int64) | |
440 | VFP_CONV_FIX(uh, h, 16, dh_ctype_f16, 32, uint16) | |
441 | VFP_CONV_FIX(ul, h, 16, dh_ctype_f16, 32, uint32) | |
442 | VFP_CONV_FIX_A64(uq, h, 16, dh_ctype_f16, 64, uint64) | |
37356079 RH |
443 | |
444 | #undef VFP_CONV_FIX | |
445 | #undef VFP_CONV_FIX_FLOAT | |
446 | #undef VFP_CONV_FLOAT_FIX_ROUND | |
447 | #undef VFP_CONV_FIX_A64 | |
448 | ||
37356079 RH |
449 | /* Set the current fp rounding mode and return the old one. |
450 | * The argument is a softfloat float_round_ value. | |
451 | */ | |
452 | uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp) | |
453 | { | |
454 | float_status *fp_status = fpstp; | |
455 | ||
456 | uint32_t prev_rmode = get_float_rounding_mode(fp_status); | |
457 | set_float_rounding_mode(rmode, fp_status); | |
458 | ||
459 | return prev_rmode; | |
460 | } | |
461 | ||
462 | /* Set the current fp rounding mode in the standard fp status and return | |
463 | * the old one. This is for NEON instructions that need to change the | |
464 | * rounding mode but wish to use the standard FPSCR values for everything | |
465 | * else. Always set the rounding mode back to the correct value after | |
466 | * modifying it. | |
467 | * The argument is a softfloat float_round_ value. | |
468 | */ | |
469 | uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env) | |
470 | { | |
471 | float_status *fp_status = &env->vfp.standard_fp_status; | |
472 | ||
473 | uint32_t prev_rmode = get_float_rounding_mode(fp_status); | |
474 | set_float_rounding_mode(rmode, fp_status); | |
475 | ||
476 | return prev_rmode; | |
477 | } | |
478 | ||
479 | /* Half precision conversions. */ | |
480 | float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode) | |
481 | { | |
482 | /* Squash FZ16 to 0 for the duration of conversion. In this case, | |
483 | * it would affect flushing input denormals. | |
484 | */ | |
485 | float_status *fpst = fpstp; | |
c120391c | 486 | bool save = get_flush_inputs_to_zero(fpst); |
37356079 RH |
487 | set_flush_inputs_to_zero(false, fpst); |
488 | float32 r = float16_to_float32(a, !ahp_mode, fpst); | |
489 | set_flush_inputs_to_zero(save, fpst); | |
490 | return r; | |
491 | } | |
492 | ||
493 | uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode) | |
494 | { | |
495 | /* Squash FZ16 to 0 for the duration of conversion. In this case, | |
496 | * it would affect flushing output denormals. | |
497 | */ | |
498 | float_status *fpst = fpstp; | |
c120391c | 499 | bool save = get_flush_to_zero(fpst); |
37356079 RH |
500 | set_flush_to_zero(false, fpst); |
501 | float16 r = float32_to_float16(a, !ahp_mode, fpst); | |
502 | set_flush_to_zero(save, fpst); | |
503 | return r; | |
504 | } | |
505 | ||
506 | float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode) | |
507 | { | |
508 | /* Squash FZ16 to 0 for the duration of conversion. In this case, | |
509 | * it would affect flushing input denormals. | |
510 | */ | |
511 | float_status *fpst = fpstp; | |
c120391c | 512 | bool save = get_flush_inputs_to_zero(fpst); |
37356079 RH |
513 | set_flush_inputs_to_zero(false, fpst); |
514 | float64 r = float16_to_float64(a, !ahp_mode, fpst); | |
515 | set_flush_inputs_to_zero(save, fpst); | |
516 | return r; | |
517 | } | |
518 | ||
519 | uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode) | |
520 | { | |
521 | /* Squash FZ16 to 0 for the duration of conversion. In this case, | |
522 | * it would affect flushing output denormals. | |
523 | */ | |
524 | float_status *fpst = fpstp; | |
c120391c | 525 | bool save = get_flush_to_zero(fpst); |
37356079 RH |
526 | set_flush_to_zero(false, fpst); |
527 | float16 r = float64_to_float16(a, !ahp_mode, fpst); | |
528 | set_flush_to_zero(save, fpst); | |
529 | return r; | |
530 | } | |
531 | ||
26c6f695 | 532 | float32 HELPER(recps_f32)(CPUARMState *env, float32 a, float32 b) |
37356079 RH |
533 | { |
534 | float_status *s = &env->vfp.standard_fp_status; | |
535 | if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || | |
536 | (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { | |
537 | if (!(float32_is_zero(a) || float32_is_zero(b))) { | |
538 | float_raise(float_flag_input_denormal, s); | |
539 | } | |
540 | return float32_two; | |
541 | } | |
542 | return float32_sub(float32_two, float32_mul(a, b, s), s); | |
543 | } | |
544 | ||
26c6f695 | 545 | float32 HELPER(rsqrts_f32)(CPUARMState *env, float32 a, float32 b) |
37356079 RH |
546 | { |
547 | float_status *s = &env->vfp.standard_fp_status; | |
548 | float32 product; | |
549 | if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || | |
550 | (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { | |
551 | if (!(float32_is_zero(a) || float32_is_zero(b))) { | |
552 | float_raise(float_flag_input_denormal, s); | |
553 | } | |
554 | return float32_one_point_five; | |
555 | } | |
556 | product = float32_mul(a, b, s); | |
557 | return float32_div(float32_sub(float32_three, product, s), float32_two, s); | |
558 | } | |
559 | ||
560 | /* NEON helpers. */ | |
561 | ||
562 | /* Constants 256 and 512 are used in some helpers; we avoid relying on | |
563 | * int->float conversions at run-time. */ | |
564 | #define float64_256 make_float64(0x4070000000000000LL) | |
565 | #define float64_512 make_float64(0x4080000000000000LL) | |
566 | #define float16_maxnorm make_float16(0x7bff) | |
567 | #define float32_maxnorm make_float32(0x7f7fffff) | |
568 | #define float64_maxnorm make_float64(0x7fefffffffffffffLL) | |
569 | ||
570 | /* Reciprocal functions | |
571 | * | |
572 | * The algorithm that must be used to calculate the estimate | |
573 | * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate | |
574 | */ | |
575 | ||
576 | /* See RecipEstimate() | |
577 | * | |
578 | * input is a 9 bit fixed point number | |
579 | * input range 256 .. 511 for a number from 0.5 <= x < 1.0. | |
580 | * result range 256 .. 511 for a number from 1.0 to 511/256. | |
581 | */ | |
582 | ||
583 | static int recip_estimate(int input) | |
584 | { | |
585 | int a, b, r; | |
586 | assert(256 <= input && input < 512); | |
587 | a = (input * 2) + 1; | |
588 | b = (1 << 19) / a; | |
589 | r = (b + 1) >> 1; | |
590 | assert(256 <= r && r < 512); | |
591 | return r; | |
592 | } | |
593 | ||
594 | /* | |
595 | * Common wrapper to call recip_estimate | |
596 | * | |
597 | * The parameters are exponent and 64 bit fraction (without implicit | |
598 | * bit) where the binary point is nominally at bit 52. Returns a | |
599 | * float64 which can then be rounded to the appropriate size by the | |
600 | * callee. | |
601 | */ | |
602 | ||
603 | static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac) | |
604 | { | |
605 | uint32_t scaled, estimate; | |
606 | uint64_t result_frac; | |
607 | int result_exp; | |
608 | ||
609 | /* Handle sub-normals */ | |
610 | if (*exp == 0) { | |
611 | if (extract64(frac, 51, 1) == 0) { | |
612 | *exp = -1; | |
613 | frac <<= 2; | |
614 | } else { | |
615 | frac <<= 1; | |
616 | } | |
617 | } | |
618 | ||
619 | /* scaled = UInt('1':fraction<51:44>) */ | |
620 | scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); | |
621 | estimate = recip_estimate(scaled); | |
622 | ||
623 | result_exp = exp_off - *exp; | |
624 | result_frac = deposit64(0, 44, 8, estimate); | |
625 | if (result_exp == 0) { | |
626 | result_frac = deposit64(result_frac >> 1, 51, 1, 1); | |
627 | } else if (result_exp == -1) { | |
628 | result_frac = deposit64(result_frac >> 2, 50, 2, 1); | |
629 | result_exp = 0; | |
630 | } | |
631 | ||
632 | *exp = result_exp; | |
633 | ||
634 | return result_frac; | |
635 | } | |
636 | ||
637 | static bool round_to_inf(float_status *fpst, bool sign_bit) | |
638 | { | |
639 | switch (fpst->float_rounding_mode) { | |
640 | case float_round_nearest_even: /* Round to Nearest */ | |
641 | return true; | |
642 | case float_round_up: /* Round to +Inf */ | |
643 | return !sign_bit; | |
644 | case float_round_down: /* Round to -Inf */ | |
645 | return sign_bit; | |
646 | case float_round_to_zero: /* Round to Zero */ | |
647 | return false; | |
3dede407 RH |
648 | default: |
649 | g_assert_not_reached(); | |
37356079 | 650 | } |
37356079 RH |
651 | } |
652 | ||
653 | uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp) | |
654 | { | |
655 | float_status *fpst = fpstp; | |
656 | float16 f16 = float16_squash_input_denormal(input, fpst); | |
657 | uint32_t f16_val = float16_val(f16); | |
658 | uint32_t f16_sign = float16_is_neg(f16); | |
659 | int f16_exp = extract32(f16_val, 10, 5); | |
660 | uint32_t f16_frac = extract32(f16_val, 0, 10); | |
661 | uint64_t f64_frac; | |
662 | ||
663 | if (float16_is_any_nan(f16)) { | |
664 | float16 nan = f16; | |
665 | if (float16_is_signaling_nan(f16, fpst)) { | |
666 | float_raise(float_flag_invalid, fpst); | |
667 | nan = float16_silence_nan(f16, fpst); | |
668 | } | |
669 | if (fpst->default_nan_mode) { | |
670 | nan = float16_default_nan(fpst); | |
671 | } | |
672 | return nan; | |
673 | } else if (float16_is_infinity(f16)) { | |
674 | return float16_set_sign(float16_zero, float16_is_neg(f16)); | |
675 | } else if (float16_is_zero(f16)) { | |
676 | float_raise(float_flag_divbyzero, fpst); | |
677 | return float16_set_sign(float16_infinity, float16_is_neg(f16)); | |
678 | } else if (float16_abs(f16) < (1 << 8)) { | |
679 | /* Abs(value) < 2.0^-16 */ | |
680 | float_raise(float_flag_overflow | float_flag_inexact, fpst); | |
681 | if (round_to_inf(fpst, f16_sign)) { | |
682 | return float16_set_sign(float16_infinity, f16_sign); | |
683 | } else { | |
684 | return float16_set_sign(float16_maxnorm, f16_sign); | |
685 | } | |
686 | } else if (f16_exp >= 29 && fpst->flush_to_zero) { | |
687 | float_raise(float_flag_underflow, fpst); | |
688 | return float16_set_sign(float16_zero, float16_is_neg(f16)); | |
689 | } | |
690 | ||
691 | f64_frac = call_recip_estimate(&f16_exp, 29, | |
692 | ((uint64_t) f16_frac) << (52 - 10)); | |
693 | ||
694 | /* result = sign : result_exp<4:0> : fraction<51:42> */ | |
695 | f16_val = deposit32(0, 15, 1, f16_sign); | |
696 | f16_val = deposit32(f16_val, 10, 5, f16_exp); | |
697 | f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10)); | |
698 | return make_float16(f16_val); | |
699 | } | |
700 | ||
701 | float32 HELPER(recpe_f32)(float32 input, void *fpstp) | |
702 | { | |
703 | float_status *fpst = fpstp; | |
704 | float32 f32 = float32_squash_input_denormal(input, fpst); | |
705 | uint32_t f32_val = float32_val(f32); | |
706 | bool f32_sign = float32_is_neg(f32); | |
707 | int f32_exp = extract32(f32_val, 23, 8); | |
708 | uint32_t f32_frac = extract32(f32_val, 0, 23); | |
709 | uint64_t f64_frac; | |
710 | ||
711 | if (float32_is_any_nan(f32)) { | |
712 | float32 nan = f32; | |
713 | if (float32_is_signaling_nan(f32, fpst)) { | |
714 | float_raise(float_flag_invalid, fpst); | |
715 | nan = float32_silence_nan(f32, fpst); | |
716 | } | |
717 | if (fpst->default_nan_mode) { | |
718 | nan = float32_default_nan(fpst); | |
719 | } | |
720 | return nan; | |
721 | } else if (float32_is_infinity(f32)) { | |
722 | return float32_set_sign(float32_zero, float32_is_neg(f32)); | |
723 | } else if (float32_is_zero(f32)) { | |
724 | float_raise(float_flag_divbyzero, fpst); | |
725 | return float32_set_sign(float32_infinity, float32_is_neg(f32)); | |
726 | } else if (float32_abs(f32) < (1ULL << 21)) { | |
727 | /* Abs(value) < 2.0^-128 */ | |
728 | float_raise(float_flag_overflow | float_flag_inexact, fpst); | |
729 | if (round_to_inf(fpst, f32_sign)) { | |
730 | return float32_set_sign(float32_infinity, f32_sign); | |
731 | } else { | |
732 | return float32_set_sign(float32_maxnorm, f32_sign); | |
733 | } | |
734 | } else if (f32_exp >= 253 && fpst->flush_to_zero) { | |
735 | float_raise(float_flag_underflow, fpst); | |
736 | return float32_set_sign(float32_zero, float32_is_neg(f32)); | |
737 | } | |
738 | ||
739 | f64_frac = call_recip_estimate(&f32_exp, 253, | |
740 | ((uint64_t) f32_frac) << (52 - 23)); | |
741 | ||
742 | /* result = sign : result_exp<7:0> : fraction<51:29> */ | |
743 | f32_val = deposit32(0, 31, 1, f32_sign); | |
744 | f32_val = deposit32(f32_val, 23, 8, f32_exp); | |
745 | f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23)); | |
746 | return make_float32(f32_val); | |
747 | } | |
748 | ||
749 | float64 HELPER(recpe_f64)(float64 input, void *fpstp) | |
750 | { | |
751 | float_status *fpst = fpstp; | |
752 | float64 f64 = float64_squash_input_denormal(input, fpst); | |
753 | uint64_t f64_val = float64_val(f64); | |
754 | bool f64_sign = float64_is_neg(f64); | |
755 | int f64_exp = extract64(f64_val, 52, 11); | |
756 | uint64_t f64_frac = extract64(f64_val, 0, 52); | |
757 | ||
758 | /* Deal with any special cases */ | |
759 | if (float64_is_any_nan(f64)) { | |
760 | float64 nan = f64; | |
761 | if (float64_is_signaling_nan(f64, fpst)) { | |
762 | float_raise(float_flag_invalid, fpst); | |
763 | nan = float64_silence_nan(f64, fpst); | |
764 | } | |
765 | if (fpst->default_nan_mode) { | |
766 | nan = float64_default_nan(fpst); | |
767 | } | |
768 | return nan; | |
769 | } else if (float64_is_infinity(f64)) { | |
770 | return float64_set_sign(float64_zero, float64_is_neg(f64)); | |
771 | } else if (float64_is_zero(f64)) { | |
772 | float_raise(float_flag_divbyzero, fpst); | |
773 | return float64_set_sign(float64_infinity, float64_is_neg(f64)); | |
774 | } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { | |
775 | /* Abs(value) < 2.0^-1024 */ | |
776 | float_raise(float_flag_overflow | float_flag_inexact, fpst); | |
777 | if (round_to_inf(fpst, f64_sign)) { | |
778 | return float64_set_sign(float64_infinity, f64_sign); | |
779 | } else { | |
780 | return float64_set_sign(float64_maxnorm, f64_sign); | |
781 | } | |
782 | } else if (f64_exp >= 2045 && fpst->flush_to_zero) { | |
783 | float_raise(float_flag_underflow, fpst); | |
784 | return float64_set_sign(float64_zero, float64_is_neg(f64)); | |
785 | } | |
786 | ||
787 | f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac); | |
788 | ||
789 | /* result = sign : result_exp<10:0> : fraction<51:0>; */ | |
790 | f64_val = deposit64(0, 63, 1, f64_sign); | |
791 | f64_val = deposit64(f64_val, 52, 11, f64_exp); | |
792 | f64_val = deposit64(f64_val, 0, 52, f64_frac); | |
793 | return make_float64(f64_val); | |
794 | } | |
795 | ||
796 | /* The algorithm that must be used to calculate the estimate | |
797 | * is specified by the ARM ARM. | |
798 | */ | |
799 | ||
800 | static int do_recip_sqrt_estimate(int a) | |
801 | { | |
802 | int b, estimate; | |
803 | ||
804 | assert(128 <= a && a < 512); | |
805 | if (a < 256) { | |
806 | a = a * 2 + 1; | |
807 | } else { | |
808 | a = (a >> 1) << 1; | |
809 | a = (a + 1) * 2; | |
810 | } | |
811 | b = 512; | |
812 | while (a * (b + 1) * (b + 1) < (1 << 28)) { | |
813 | b += 1; | |
814 | } | |
815 | estimate = (b + 1) / 2; | |
816 | assert(256 <= estimate && estimate < 512); | |
817 | ||
818 | return estimate; | |
819 | } | |
820 | ||
821 | ||
822 | static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac) | |
823 | { | |
824 | int estimate; | |
825 | uint32_t scaled; | |
826 | ||
827 | if (*exp == 0) { | |
828 | while (extract64(frac, 51, 1) == 0) { | |
829 | frac = frac << 1; | |
830 | *exp -= 1; | |
831 | } | |
832 | frac = extract64(frac, 0, 51) << 1; | |
833 | } | |
834 | ||
835 | if (*exp & 1) { | |
836 | /* scaled = UInt('01':fraction<51:45>) */ | |
837 | scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); | |
838 | } else { | |
839 | /* scaled = UInt('1':fraction<51:44>) */ | |
840 | scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); | |
841 | } | |
842 | estimate = do_recip_sqrt_estimate(scaled); | |
843 | ||
844 | *exp = (exp_off - *exp) / 2; | |
845 | return extract64(estimate, 0, 8) << 44; | |
846 | } | |
847 | ||
848 | uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp) | |
849 | { | |
850 | float_status *s = fpstp; | |
851 | float16 f16 = float16_squash_input_denormal(input, s); | |
852 | uint16_t val = float16_val(f16); | |
853 | bool f16_sign = float16_is_neg(f16); | |
854 | int f16_exp = extract32(val, 10, 5); | |
855 | uint16_t f16_frac = extract32(val, 0, 10); | |
856 | uint64_t f64_frac; | |
857 | ||
858 | if (float16_is_any_nan(f16)) { | |
859 | float16 nan = f16; | |
860 | if (float16_is_signaling_nan(f16, s)) { | |
861 | float_raise(float_flag_invalid, s); | |
862 | nan = float16_silence_nan(f16, s); | |
863 | } | |
864 | if (s->default_nan_mode) { | |
865 | nan = float16_default_nan(s); | |
866 | } | |
867 | return nan; | |
868 | } else if (float16_is_zero(f16)) { | |
869 | float_raise(float_flag_divbyzero, s); | |
870 | return float16_set_sign(float16_infinity, f16_sign); | |
871 | } else if (f16_sign) { | |
872 | float_raise(float_flag_invalid, s); | |
873 | return float16_default_nan(s); | |
874 | } else if (float16_is_infinity(f16)) { | |
875 | return float16_zero; | |
876 | } | |
877 | ||
878 | /* Scale and normalize to a double-precision value between 0.25 and 1.0, | |
879 | * preserving the parity of the exponent. */ | |
880 | ||
881 | f64_frac = ((uint64_t) f16_frac) << (52 - 10); | |
882 | ||
883 | f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac); | |
884 | ||
885 | /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */ | |
886 | val = deposit32(0, 15, 1, f16_sign); | |
887 | val = deposit32(val, 10, 5, f16_exp); | |
888 | val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8)); | |
889 | return make_float16(val); | |
890 | } | |
891 | ||
892 | float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) | |
893 | { | |
894 | float_status *s = fpstp; | |
895 | float32 f32 = float32_squash_input_denormal(input, s); | |
896 | uint32_t val = float32_val(f32); | |
897 | uint32_t f32_sign = float32_is_neg(f32); | |
898 | int f32_exp = extract32(val, 23, 8); | |
899 | uint32_t f32_frac = extract32(val, 0, 23); | |
900 | uint64_t f64_frac; | |
901 | ||
902 | if (float32_is_any_nan(f32)) { | |
903 | float32 nan = f32; | |
904 | if (float32_is_signaling_nan(f32, s)) { | |
905 | float_raise(float_flag_invalid, s); | |
906 | nan = float32_silence_nan(f32, s); | |
907 | } | |
908 | if (s->default_nan_mode) { | |
909 | nan = float32_default_nan(s); | |
910 | } | |
911 | return nan; | |
912 | } else if (float32_is_zero(f32)) { | |
913 | float_raise(float_flag_divbyzero, s); | |
914 | return float32_set_sign(float32_infinity, float32_is_neg(f32)); | |
915 | } else if (float32_is_neg(f32)) { | |
916 | float_raise(float_flag_invalid, s); | |
917 | return float32_default_nan(s); | |
918 | } else if (float32_is_infinity(f32)) { | |
919 | return float32_zero; | |
920 | } | |
921 | ||
922 | /* Scale and normalize to a double-precision value between 0.25 and 1.0, | |
923 | * preserving the parity of the exponent. */ | |
924 | ||
925 | f64_frac = ((uint64_t) f32_frac) << 29; | |
926 | ||
927 | f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac); | |
928 | ||
929 | /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */ | |
930 | val = deposit32(0, 31, 1, f32_sign); | |
931 | val = deposit32(val, 23, 8, f32_exp); | |
932 | val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); | |
933 | return make_float32(val); | |
934 | } | |
935 | ||
936 | float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) | |
937 | { | |
938 | float_status *s = fpstp; | |
939 | float64 f64 = float64_squash_input_denormal(input, s); | |
940 | uint64_t val = float64_val(f64); | |
941 | bool f64_sign = float64_is_neg(f64); | |
942 | int f64_exp = extract64(val, 52, 11); | |
943 | uint64_t f64_frac = extract64(val, 0, 52); | |
944 | ||
945 | if (float64_is_any_nan(f64)) { | |
946 | float64 nan = f64; | |
947 | if (float64_is_signaling_nan(f64, s)) { | |
948 | float_raise(float_flag_invalid, s); | |
949 | nan = float64_silence_nan(f64, s); | |
950 | } | |
951 | if (s->default_nan_mode) { | |
952 | nan = float64_default_nan(s); | |
953 | } | |
954 | return nan; | |
955 | } else if (float64_is_zero(f64)) { | |
956 | float_raise(float_flag_divbyzero, s); | |
957 | return float64_set_sign(float64_infinity, float64_is_neg(f64)); | |
958 | } else if (float64_is_neg(f64)) { | |
959 | float_raise(float_flag_invalid, s); | |
960 | return float64_default_nan(s); | |
961 | } else if (float64_is_infinity(f64)) { | |
962 | return float64_zero; | |
963 | } | |
964 | ||
965 | f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac); | |
966 | ||
967 | /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */ | |
968 | val = deposit64(0, 61, 1, f64_sign); | |
969 | val = deposit64(val, 52, 11, f64_exp); | |
970 | val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8)); | |
971 | return make_float64(val); | |
972 | } | |
973 | ||
fe6fb4be | 974 | uint32_t HELPER(recpe_u32)(uint32_t a) |
37356079 | 975 | { |
37356079 RH |
976 | int input, estimate; |
977 | ||
978 | if ((a & 0x80000000) == 0) { | |
979 | return 0xffffffff; | |
980 | } | |
981 | ||
982 | input = extract32(a, 23, 9); | |
983 | estimate = recip_estimate(input); | |
984 | ||
985 | return deposit32(0, (32 - 9), 9, estimate); | |
986 | } | |
987 | ||
fe6fb4be | 988 | uint32_t HELPER(rsqrte_u32)(uint32_t a) |
37356079 RH |
989 | { |
990 | int estimate; | |
991 | ||
992 | if ((a & 0xc0000000) == 0) { | |
993 | return 0xffffffff; | |
994 | } | |
995 | ||
996 | estimate = do_recip_sqrt_estimate(extract32(a, 23, 9)); | |
997 | ||
998 | return deposit32(0, 23, 9, estimate); | |
999 | } | |
1000 | ||
1001 | /* VFPv4 fused multiply-accumulate */ | |
9886fe28 PM |
1002 | dh_ctype_f16 VFP_HELPER(muladd, h)(dh_ctype_f16 a, dh_ctype_f16 b, |
1003 | dh_ctype_f16 c, void *fpstp) | |
1004 | { | |
1005 | float_status *fpst = fpstp; | |
1006 | return float16_muladd(a, b, c, 0, fpst); | |
1007 | } | |
1008 | ||
37356079 RH |
1009 | float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) |
1010 | { | |
1011 | float_status *fpst = fpstp; | |
1012 | return float32_muladd(a, b, c, 0, fpst); | |
1013 | } | |
1014 | ||
1015 | float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) | |
1016 | { | |
1017 | float_status *fpst = fpstp; | |
1018 | return float64_muladd(a, b, c, 0, fpst); | |
1019 | } | |
1020 | ||
1021 | /* ARMv8 round to integral */ | |
1022 | float32 HELPER(rints_exact)(float32 x, void *fp_status) | |
1023 | { | |
1024 | return float32_round_to_int(x, fp_status); | |
1025 | } | |
1026 | ||
1027 | float64 HELPER(rintd_exact)(float64 x, void *fp_status) | |
1028 | { | |
1029 | return float64_round_to_int(x, fp_status); | |
1030 | } | |
1031 | ||
1032 | float32 HELPER(rints)(float32 x, void *fp_status) | |
1033 | { | |
1034 | int old_flags = get_float_exception_flags(fp_status), new_flags; | |
1035 | float32 ret; | |
1036 | ||
1037 | ret = float32_round_to_int(x, fp_status); | |
1038 | ||
1039 | /* Suppress any inexact exceptions the conversion produced */ | |
1040 | if (!(old_flags & float_flag_inexact)) { | |
1041 | new_flags = get_float_exception_flags(fp_status); | |
1042 | set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); | |
1043 | } | |
1044 | ||
1045 | return ret; | |
1046 | } | |
1047 | ||
1048 | float64 HELPER(rintd)(float64 x, void *fp_status) | |
1049 | { | |
1050 | int old_flags = get_float_exception_flags(fp_status), new_flags; | |
1051 | float64 ret; | |
1052 | ||
1053 | ret = float64_round_to_int(x, fp_status); | |
1054 | ||
1055 | new_flags = get_float_exception_flags(fp_status); | |
1056 | ||
1057 | /* Suppress any inexact exceptions the conversion produced */ | |
1058 | if (!(old_flags & float_flag_inexact)) { | |
1059 | new_flags = get_float_exception_flags(fp_status); | |
1060 | set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); | |
1061 | } | |
1062 | ||
1063 | return ret; | |
1064 | } | |
1065 | ||
1066 | /* Convert ARM rounding mode to softfloat */ | |
1067 | int arm_rmode_to_sf(int rmode) | |
1068 | { | |
1069 | switch (rmode) { | |
1070 | case FPROUNDING_TIEAWAY: | |
1071 | rmode = float_round_ties_away; | |
1072 | break; | |
1073 | case FPROUNDING_ODD: | |
1074 | /* FIXME: add support for TIEAWAY and ODD */ | |
1075 | qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", | |
1076 | rmode); | |
1077 | /* fall through for now */ | |
1078 | case FPROUNDING_TIEEVEN: | |
1079 | default: | |
1080 | rmode = float_round_nearest_even; | |
1081 | break; | |
1082 | case FPROUNDING_POSINF: | |
1083 | rmode = float_round_up; | |
1084 | break; | |
1085 | case FPROUNDING_NEGINF: | |
1086 | rmode = float_round_down; | |
1087 | break; | |
1088 | case FPROUNDING_ZERO: | |
1089 | rmode = float_round_to_zero; | |
1090 | break; | |
1091 | } | |
1092 | return rmode; | |
1093 | } | |
6c1f6f27 RH |
1094 | |
1095 | /* | |
1096 | * Implement float64 to int32_t conversion without saturation; | |
1097 | * the result is supplied modulo 2^32. | |
1098 | */ | |
1099 | uint64_t HELPER(fjcvtzs)(float64 value, void *vstatus) | |
1100 | { | |
1101 | float_status *status = vstatus; | |
1102 | uint32_t exp, sign; | |
1103 | uint64_t frac; | |
1104 | uint32_t inexact = 1; /* !Z */ | |
1105 | ||
1106 | sign = extract64(value, 63, 1); | |
1107 | exp = extract64(value, 52, 11); | |
1108 | frac = extract64(value, 0, 52); | |
1109 | ||
1110 | if (exp == 0) { | |
1111 | /* While not inexact for IEEE FP, -0.0 is inexact for JavaScript. */ | |
1112 | inexact = sign; | |
1113 | if (frac != 0) { | |
1114 | if (status->flush_inputs_to_zero) { | |
1115 | float_raise(float_flag_input_denormal, status); | |
1116 | } else { | |
1117 | float_raise(float_flag_inexact, status); | |
1118 | inexact = 1; | |
1119 | } | |
1120 | } | |
1121 | frac = 0; | |
1122 | } else if (exp == 0x7ff) { | |
1123 | /* This operation raises Invalid for both NaN and overflow (Inf). */ | |
1124 | float_raise(float_flag_invalid, status); | |
1125 | frac = 0; | |
1126 | } else { | |
1127 | int true_exp = exp - 1023; | |
1128 | int shift = true_exp - 52; | |
1129 | ||
1130 | /* Restore implicit bit. */ | |
1131 | frac |= 1ull << 52; | |
1132 | ||
1133 | /* Shift the fraction into place. */ | |
1134 | if (shift >= 0) { | |
1135 | /* The number is so large we must shift the fraction left. */ | |
1136 | if (shift >= 64) { | |
1137 | /* The fraction is shifted out entirely. */ | |
1138 | frac = 0; | |
1139 | } else { | |
1140 | frac <<= shift; | |
1141 | } | |
1142 | } else if (shift > -64) { | |
1143 | /* Normal case -- shift right and notice if bits shift out. */ | |
1144 | inexact = (frac << (64 + shift)) != 0; | |
1145 | frac >>= -shift; | |
1146 | } else { | |
1147 | /* The fraction is shifted out entirely. */ | |
1148 | frac = 0; | |
1149 | } | |
1150 | ||
1151 | /* Notice overflow or inexact exceptions. */ | |
1152 | if (true_exp > 31 || frac > (sign ? 0x80000000ull : 0x7fffffff)) { | |
1153 | /* Overflow, for which this operation raises invalid. */ | |
1154 | float_raise(float_flag_invalid, status); | |
1155 | inexact = 1; | |
1156 | } else if (inexact) { | |
1157 | float_raise(float_flag_inexact, status); | |
1158 | } | |
1159 | ||
1160 | /* Honor the sign. */ | |
1161 | if (sign) { | |
1162 | frac = -frac; | |
1163 | } | |
1164 | } | |
1165 | ||
1166 | /* Pack the result and the env->ZF representation of Z together. */ | |
1167 | return deposit64(frac, 32, 32, inexact); | |
1168 | } | |
1169 | ||
1170 | uint32_t HELPER(vjcvt)(float64 value, CPUARMState *env) | |
1171 | { | |
1172 | uint64_t pair = HELPER(fjcvtzs)(value, &env->vfp.fp_status); | |
1173 | uint32_t result = pair; | |
1174 | uint32_t z = (pair >> 32) == 0; | |
1175 | ||
1176 | /* Store Z, clear NCV, in FPSCR.NZCV. */ | |
1177 | env->vfp.xregs[ARM_VFP_FPSCR] | |
1178 | = (env->vfp.xregs[ARM_VFP_FPSCR] & ~CPSR_NZCV) | (z * CPSR_Z); | |
1179 | ||
1180 | return result; | |
1181 | } | |
6bea2563 RH |
1182 | |
1183 | /* Round a float32 to an integer that fits in int32_t or int64_t. */ | |
1184 | static float32 frint_s(float32 f, float_status *fpst, int intsize) | |
1185 | { | |
1186 | int old_flags = get_float_exception_flags(fpst); | |
1187 | uint32_t exp = extract32(f, 23, 8); | |
1188 | ||
1189 | if (unlikely(exp == 0xff)) { | |
1190 | /* NaN or Inf. */ | |
1191 | goto overflow; | |
1192 | } | |
1193 | ||
1194 | /* Round and re-extract the exponent. */ | |
1195 | f = float32_round_to_int(f, fpst); | |
1196 | exp = extract32(f, 23, 8); | |
1197 | ||
1198 | /* Validate the range of the result. */ | |
1199 | if (exp < 126 + intsize) { | |
1200 | /* abs(F) <= INT{N}_MAX */ | |
1201 | return f; | |
1202 | } | |
1203 | if (exp == 126 + intsize) { | |
1204 | uint32_t sign = extract32(f, 31, 1); | |
1205 | uint32_t frac = extract32(f, 0, 23); | |
1206 | if (sign && frac == 0) { | |
1207 | /* F == INT{N}_MIN */ | |
1208 | return f; | |
1209 | } | |
1210 | } | |
1211 | ||
1212 | overflow: | |
1213 | /* | |
1214 | * Raise Invalid and return INT{N}_MIN as a float. Revert any | |
1215 | * inexact exception float32_round_to_int may have raised. | |
1216 | */ | |
1217 | set_float_exception_flags(old_flags | float_flag_invalid, fpst); | |
1218 | return (0x100u + 126u + intsize) << 23; | |
1219 | } | |
1220 | ||
1221 | float32 HELPER(frint32_s)(float32 f, void *fpst) | |
1222 | { | |
1223 | return frint_s(f, fpst, 32); | |
1224 | } | |
1225 | ||
1226 | float32 HELPER(frint64_s)(float32 f, void *fpst) | |
1227 | { | |
1228 | return frint_s(f, fpst, 64); | |
1229 | } | |
1230 | ||
1231 | /* Round a float64 to an integer that fits in int32_t or int64_t. */ | |
1232 | static float64 frint_d(float64 f, float_status *fpst, int intsize) | |
1233 | { | |
1234 | int old_flags = get_float_exception_flags(fpst); | |
1235 | uint32_t exp = extract64(f, 52, 11); | |
1236 | ||
1237 | if (unlikely(exp == 0x7ff)) { | |
1238 | /* NaN or Inf. */ | |
1239 | goto overflow; | |
1240 | } | |
1241 | ||
1242 | /* Round and re-extract the exponent. */ | |
1243 | f = float64_round_to_int(f, fpst); | |
1244 | exp = extract64(f, 52, 11); | |
1245 | ||
1246 | /* Validate the range of the result. */ | |
1247 | if (exp < 1022 + intsize) { | |
1248 | /* abs(F) <= INT{N}_MAX */ | |
1249 | return f; | |
1250 | } | |
1251 | if (exp == 1022 + intsize) { | |
1252 | uint64_t sign = extract64(f, 63, 1); | |
1253 | uint64_t frac = extract64(f, 0, 52); | |
1254 | if (sign && frac == 0) { | |
1255 | /* F == INT{N}_MIN */ | |
1256 | return f; | |
1257 | } | |
1258 | } | |
1259 | ||
1260 | overflow: | |
1261 | /* | |
1262 | * Raise Invalid and return INT{N}_MIN as a float. Revert any | |
1263 | * inexact exception float64_round_to_int may have raised. | |
1264 | */ | |
1265 | set_float_exception_flags(old_flags | float_flag_invalid, fpst); | |
1266 | return (uint64_t)(0x800 + 1022 + intsize) << 52; | |
1267 | } | |
1268 | ||
1269 | float64 HELPER(frint32_d)(float64 f, void *fpst) | |
1270 | { | |
1271 | return frint_d(f, fpst, 32); | |
1272 | } | |
1273 | ||
1274 | float64 HELPER(frint64_d)(float64 f, void *fpst) | |
1275 | { | |
1276 | return frint_d(f, fpst, 64); | |
1277 | } | |
4a15527c | 1278 | |
9ca1d776 MZ |
1279 | void HELPER(check_hcr_el2_trap)(CPUARMState *env, uint32_t rt, uint32_t reg) |
1280 | { | |
1281 | uint32_t syndrome; | |
1282 | ||
1283 | switch (reg) { | |
1284 | case ARM_VFP_MVFR0: | |
1285 | case ARM_VFP_MVFR1: | |
1286 | case ARM_VFP_MVFR2: | |
1287 | if (!(arm_hcr_el2_eff(env) & HCR_TID3)) { | |
1288 | return; | |
1289 | } | |
1290 | break; | |
1291 | case ARM_VFP_FPSID: | |
1292 | if (!(arm_hcr_el2_eff(env) & HCR_TID0)) { | |
1293 | return; | |
1294 | } | |
1295 | break; | |
1296 | default: | |
1297 | g_assert_not_reached(); | |
1298 | } | |
1299 | ||
1300 | syndrome = ((EC_FPIDTRAP << ARM_EL_EC_SHIFT) | |
1301 | | ARM_EL_IL | |
1302 | | (1 << 24) | (0xe << 20) | (7 << 14) | |
1303 | | (reg << 10) | (rt << 5) | 1); | |
1304 | ||
1305 | raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); | |
1306 | } | |
1307 | ||
4a15527c | 1308 | #endif |