]>
Commit | Line | Data |
---|---|---|
37356079 RH |
1 | /* |
2 | * ARM VFP floating-point operations | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2.1 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
37356079 RH |
21 | #include "cpu.h" |
22 | #include "exec/helper-proto.h" | |
37356079 | 23 | #include "internals.h" |
4a15527c PMD |
24 | #ifdef CONFIG_TCG |
25 | #include "qemu/log.h" | |
26 | #include "fpu/softfloat.h" | |
27 | #endif | |
37356079 RH |
28 | |
29 | /* VFP support. We follow the convention used for VFP instructions: | |
30 | Single precision routines have a "s" suffix, double precision a | |
31 | "d" suffix. */ | |
32 | ||
4a15527c PMD |
33 | #ifdef CONFIG_TCG |
34 | ||
37356079 RH |
35 | /* Convert host exception flags to vfp form. */ |
36 | static inline int vfp_exceptbits_from_host(int host_bits) | |
37 | { | |
38 | int target_bits = 0; | |
39 | ||
9798ac71 | 40 | if (host_bits & float_flag_invalid) { |
37356079 | 41 | target_bits |= 1; |
9798ac71 PMD |
42 | } |
43 | if (host_bits & float_flag_divbyzero) { | |
37356079 | 44 | target_bits |= 2; |
9798ac71 PMD |
45 | } |
46 | if (host_bits & float_flag_overflow) { | |
37356079 | 47 | target_bits |= 4; |
9798ac71 PMD |
48 | } |
49 | if (host_bits & (float_flag_underflow | float_flag_output_denormal)) { | |
37356079 | 50 | target_bits |= 8; |
9798ac71 PMD |
51 | } |
52 | if (host_bits & float_flag_inexact) { | |
37356079 | 53 | target_bits |= 0x10; |
9798ac71 PMD |
54 | } |
55 | if (host_bits & float_flag_input_denormal) { | |
37356079 | 56 | target_bits |= 0x80; |
9798ac71 | 57 | } |
37356079 RH |
58 | return target_bits; |
59 | } | |
60 | ||
37356079 RH |
61 | /* Convert vfp exception flags to target form. */ |
62 | static inline int vfp_exceptbits_to_host(int target_bits) | |
63 | { | |
64 | int host_bits = 0; | |
65 | ||
9798ac71 | 66 | if (target_bits & 1) { |
37356079 | 67 | host_bits |= float_flag_invalid; |
9798ac71 PMD |
68 | } |
69 | if (target_bits & 2) { | |
37356079 | 70 | host_bits |= float_flag_divbyzero; |
9798ac71 PMD |
71 | } |
72 | if (target_bits & 4) { | |
37356079 | 73 | host_bits |= float_flag_overflow; |
9798ac71 PMD |
74 | } |
75 | if (target_bits & 8) { | |
37356079 | 76 | host_bits |= float_flag_underflow; |
9798ac71 PMD |
77 | } |
78 | if (target_bits & 0x10) { | |
37356079 | 79 | host_bits |= float_flag_inexact; |
9798ac71 PMD |
80 | } |
81 | if (target_bits & 0x80) { | |
37356079 | 82 | host_bits |= float_flag_input_denormal; |
9798ac71 | 83 | } |
37356079 RH |
84 | return host_bits; |
85 | } | |
86 | ||
0c6ad948 PMD |
87 | static uint32_t vfp_get_fpscr_from_host(CPUARMState *env) |
88 | { | |
89 | uint32_t i; | |
90 | ||
91 | i = get_float_exception_flags(&env->vfp.fp_status); | |
92 | i |= get_float_exception_flags(&env->vfp.standard_fp_status); | |
93 | /* FZ16 does not generate an input denormal exception. */ | |
94 | i |= (get_float_exception_flags(&env->vfp.fp_status_f16) | |
95 | & ~float_flag_input_denormal); | |
aaae563b PM |
96 | i |= (get_float_exception_flags(&env->vfp.standard_fp_status_f16) |
97 | & ~float_flag_input_denormal); | |
0c6ad948 PMD |
98 | return vfp_exceptbits_from_host(i); |
99 | } | |
100 | ||
e9d65282 PMD |
101 | static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val) |
102 | { | |
103 | int i; | |
104 | uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR]; | |
105 | ||
106 | changed ^= val; | |
107 | if (changed & (3 << 22)) { | |
108 | i = (val >> 22) & 3; | |
109 | switch (i) { | |
110 | case FPROUNDING_TIEEVEN: | |
111 | i = float_round_nearest_even; | |
112 | break; | |
113 | case FPROUNDING_POSINF: | |
114 | i = float_round_up; | |
115 | break; | |
116 | case FPROUNDING_NEGINF: | |
117 | i = float_round_down; | |
118 | break; | |
119 | case FPROUNDING_ZERO: | |
120 | i = float_round_to_zero; | |
121 | break; | |
122 | } | |
123 | set_float_rounding_mode(i, &env->vfp.fp_status); | |
124 | set_float_rounding_mode(i, &env->vfp.fp_status_f16); | |
125 | } | |
126 | if (changed & FPCR_FZ16) { | |
127 | bool ftz_enabled = val & FPCR_FZ16; | |
128 | set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16); | |
aaae563b | 129 | set_flush_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16); |
e9d65282 | 130 | set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16); |
aaae563b | 131 | set_flush_inputs_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16); |
e9d65282 PMD |
132 | } |
133 | if (changed & FPCR_FZ) { | |
134 | bool ftz_enabled = val & FPCR_FZ; | |
135 | set_flush_to_zero(ftz_enabled, &env->vfp.fp_status); | |
136 | set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status); | |
137 | } | |
138 | if (changed & FPCR_DN) { | |
139 | bool dnan_enabled = val & FPCR_DN; | |
140 | set_default_nan_mode(dnan_enabled, &env->vfp.fp_status); | |
141 | set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16); | |
142 | } | |
143 | ||
144 | /* | |
145 | * The exception flags are ORed together when we read fpscr so we | |
146 | * only need to preserve the current state in one of our | |
147 | * float_status values. | |
148 | */ | |
149 | i = vfp_exceptbits_to_host(val); | |
150 | set_float_exception_flags(i, &env->vfp.fp_status); | |
151 | set_float_exception_flags(0, &env->vfp.fp_status_f16); | |
152 | set_float_exception_flags(0, &env->vfp.standard_fp_status); | |
aaae563b | 153 | set_float_exception_flags(0, &env->vfp.standard_fp_status_f16); |
e9d65282 PMD |
154 | } |
155 | ||
4a15527c PMD |
156 | #else |
157 | ||
158 | static uint32_t vfp_get_fpscr_from_host(CPUARMState *env) | |
159 | { | |
160 | return 0; | |
161 | } | |
162 | ||
163 | static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val) | |
164 | { | |
165 | } | |
166 | ||
167 | #endif | |
168 | ||
20e62dd8 PMD |
169 | uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) |
170 | { | |
171 | uint32_t i, fpscr; | |
172 | ||
173 | fpscr = env->vfp.xregs[ARM_VFP_FPSCR] | |
174 | | (env->vfp.vec_len << 16) | |
175 | | (env->vfp.vec_stride << 20); | |
176 | ||
0c6ad948 | 177 | fpscr |= vfp_get_fpscr_from_host(env); |
20e62dd8 PMD |
178 | |
179 | i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3]; | |
180 | fpscr |= i ? FPCR_QC : 0; | |
181 | ||
182 | return fpscr; | |
183 | } | |
184 | ||
185 | uint32_t vfp_get_fpscr(CPUARMState *env) | |
186 | { | |
187 | return HELPER(vfp_get_fpscr)(env); | |
188 | } | |
189 | ||
37356079 RH |
190 | void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) |
191 | { | |
37356079 | 192 | /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ |
6e61f839 | 193 | if (!cpu_isar_feature(any_fp16, env_archcpu(env))) { |
37356079 RH |
194 | val &= ~FPCR_FZ16; |
195 | } | |
196 | ||
5bcf8ed9 PM |
197 | if (arm_feature(env, ARM_FEATURE_M)) { |
198 | /* | |
199 | * M profile FPSCR is RES0 for the QC, STRIDE, FZ16, LEN bits | |
200 | * and also for the trapped-exception-handling bits IxE. | |
201 | */ | |
202 | val &= 0xf7c0009f; | |
203 | } | |
204 | ||
85795187 PMD |
205 | vfp_set_fpscr_to_host(env, val); |
206 | ||
37356079 RH |
207 | /* |
208 | * We don't implement trapped exception handling, so the | |
209 | * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!) | |
210 | * | |
211 | * If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC | |
212 | * (which are stored in fp_status), and the other RES0 bits | |
213 | * in between, then we clear all of the low 16 bits. | |
214 | */ | |
215 | env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000; | |
216 | env->vfp.vec_len = (val >> 16) & 7; | |
217 | env->vfp.vec_stride = (val >> 20) & 3; | |
218 | ||
219 | /* | |
220 | * The bit we set within fpscr_q is arbitrary; the register as a | |
221 | * whole being zero/non-zero is what counts. | |
222 | */ | |
223 | env->vfp.qc[0] = val & FPCR_QC; | |
224 | env->vfp.qc[1] = 0; | |
225 | env->vfp.qc[2] = 0; | |
226 | env->vfp.qc[3] = 0; | |
37356079 RH |
227 | } |
228 | ||
229 | void vfp_set_fpscr(CPUARMState *env, uint32_t val) | |
230 | { | |
231 | HELPER(vfp_set_fpscr)(env, val); | |
232 | } | |
233 | ||
4a15527c PMD |
234 | #ifdef CONFIG_TCG |
235 | ||
37356079 RH |
236 | #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) |
237 | ||
238 | #define VFP_BINOP(name) \ | |
120a0eb3 PM |
239 | dh_ctype_f16 VFP_HELPER(name, h)(dh_ctype_f16 a, dh_ctype_f16 b, void *fpstp) \ |
240 | { \ | |
241 | float_status *fpst = fpstp; \ | |
242 | return float16_ ## name(a, b, fpst); \ | |
243 | } \ | |
37356079 RH |
244 | float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ |
245 | { \ | |
246 | float_status *fpst = fpstp; \ | |
247 | return float32_ ## name(a, b, fpst); \ | |
248 | } \ | |
249 | float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ | |
250 | { \ | |
251 | float_status *fpst = fpstp; \ | |
252 | return float64_ ## name(a, b, fpst); \ | |
253 | } | |
254 | VFP_BINOP(add) | |
255 | VFP_BINOP(sub) | |
256 | VFP_BINOP(mul) | |
257 | VFP_BINOP(div) | |
258 | VFP_BINOP(min) | |
259 | VFP_BINOP(max) | |
260 | VFP_BINOP(minnum) | |
261 | VFP_BINOP(maxnum) | |
262 | #undef VFP_BINOP | |
263 | ||
e7cb0ded PM |
264 | dh_ctype_f16 VFP_HELPER(neg, h)(dh_ctype_f16 a) |
265 | { | |
266 | return float16_chs(a); | |
267 | } | |
268 | ||
37356079 RH |
269 | float32 VFP_HELPER(neg, s)(float32 a) |
270 | { | |
271 | return float32_chs(a); | |
272 | } | |
273 | ||
274 | float64 VFP_HELPER(neg, d)(float64 a) | |
275 | { | |
276 | return float64_chs(a); | |
277 | } | |
278 | ||
ce2d65a5 PM |
279 | dh_ctype_f16 VFP_HELPER(abs, h)(dh_ctype_f16 a) |
280 | { | |
281 | return float16_abs(a); | |
282 | } | |
283 | ||
37356079 RH |
284 | float32 VFP_HELPER(abs, s)(float32 a) |
285 | { | |
286 | return float32_abs(a); | |
287 | } | |
288 | ||
289 | float64 VFP_HELPER(abs, d)(float64 a) | |
290 | { | |
291 | return float64_abs(a); | |
292 | } | |
293 | ||
ce2d65a5 PM |
294 | dh_ctype_f16 VFP_HELPER(sqrt, h)(dh_ctype_f16 a, CPUARMState *env) |
295 | { | |
296 | return float16_sqrt(a, &env->vfp.fp_status_f16); | |
297 | } | |
298 | ||
37356079 RH |
299 | float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) |
300 | { | |
301 | return float32_sqrt(a, &env->vfp.fp_status); | |
302 | } | |
303 | ||
304 | float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) | |
305 | { | |
306 | return float64_sqrt(a, &env->vfp.fp_status); | |
307 | } | |
308 | ||
71bfd65c | 309 | static void softfloat_to_vfp_compare(CPUARMState *env, FloatRelation cmp) |
37356079 RH |
310 | { |
311 | uint32_t flags; | |
312 | switch (cmp) { | |
313 | case float_relation_equal: | |
314 | flags = 0x6; | |
315 | break; | |
316 | case float_relation_less: | |
317 | flags = 0x8; | |
318 | break; | |
319 | case float_relation_greater: | |
320 | flags = 0x2; | |
321 | break; | |
322 | case float_relation_unordered: | |
323 | flags = 0x3; | |
324 | break; | |
325 | default: | |
326 | g_assert_not_reached(); | |
327 | } | |
328 | env->vfp.xregs[ARM_VFP_FPSCR] = | |
329 | deposit32(env->vfp.xregs[ARM_VFP_FPSCR], 28, 4, flags); | |
330 | } | |
331 | ||
332 | /* XXX: check quiet/signaling case */ | |
333 | #define DO_VFP_cmp(p, type) \ | |
334 | void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \ | |
335 | { \ | |
336 | softfloat_to_vfp_compare(env, \ | |
337 | type ## _compare_quiet(a, b, &env->vfp.fp_status)); \ | |
338 | } \ | |
339 | void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \ | |
340 | { \ | |
341 | softfloat_to_vfp_compare(env, \ | |
342 | type ## _compare(a, b, &env->vfp.fp_status)); \ | |
343 | } | |
344 | DO_VFP_cmp(s, float32) | |
345 | DO_VFP_cmp(d, float64) | |
346 | #undef DO_VFP_cmp | |
347 | ||
348 | /* Integer to float and float to integer conversions */ | |
349 | ||
350 | #define CONV_ITOF(name, ftype, fsz, sign) \ | |
351 | ftype HELPER(name)(uint32_t x, void *fpstp) \ | |
352 | { \ | |
353 | float_status *fpst = fpstp; \ | |
354 | return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ | |
355 | } | |
356 | ||
357 | #define CONV_FTOI(name, ftype, fsz, sign, round) \ | |
358 | sign##int32_t HELPER(name)(ftype x, void *fpstp) \ | |
359 | { \ | |
360 | float_status *fpst = fpstp; \ | |
361 | if (float##fsz##_is_any_nan(x)) { \ | |
362 | float_raise(float_flag_invalid, fpst); \ | |
363 | return 0; \ | |
364 | } \ | |
365 | return float##fsz##_to_##sign##int32##round(x, fpst); \ | |
366 | } | |
367 | ||
368 | #define FLOAT_CONVS(name, p, ftype, fsz, sign) \ | |
369 | CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \ | |
370 | CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \ | |
371 | CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero) | |
372 | ||
373 | FLOAT_CONVS(si, h, uint32_t, 16, ) | |
374 | FLOAT_CONVS(si, s, float32, 32, ) | |
375 | FLOAT_CONVS(si, d, float64, 64, ) | |
376 | FLOAT_CONVS(ui, h, uint32_t, 16, u) | |
377 | FLOAT_CONVS(ui, s, float32, 32, u) | |
378 | FLOAT_CONVS(ui, d, float64, 64, u) | |
379 | ||
380 | #undef CONV_ITOF | |
381 | #undef CONV_FTOI | |
382 | #undef FLOAT_CONVS | |
383 | ||
384 | /* floating point conversion */ | |
385 | float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) | |
386 | { | |
387 | return float32_to_float64(x, &env->vfp.fp_status); | |
388 | } | |
389 | ||
390 | float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) | |
391 | { | |
392 | return float64_to_float32(x, &env->vfp.fp_status); | |
393 | } | |
394 | ||
395 | /* VFP3 fixed point conversion. */ | |
396 | #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ | |
397 | float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ | |
398 | void *fpstp) \ | |
399 | { return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); } | |
400 | ||
401 | #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff) \ | |
402 | uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \ | |
403 | void *fpst) \ | |
404 | { \ | |
405 | if (unlikely(float##fsz##_is_any_nan(x))) { \ | |
406 | float_raise(float_flag_invalid, fpst); \ | |
407 | return 0; \ | |
408 | } \ | |
409 | return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \ | |
410 | } | |
411 | ||
412 | #define VFP_CONV_FIX(name, p, fsz, isz, itype) \ | |
413 | VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ | |
414 | VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ | |
415 | float_round_to_zero, _round_to_zero) \ | |
416 | VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ | |
417 | get_float_rounding_mode(fpst), ) | |
418 | ||
419 | #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \ | |
420 | VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ | |
421 | VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ | |
422 | get_float_rounding_mode(fpst), ) | |
423 | ||
424 | VFP_CONV_FIX(sh, d, 64, 64, int16) | |
425 | VFP_CONV_FIX(sl, d, 64, 64, int32) | |
426 | VFP_CONV_FIX_A64(sq, d, 64, 64, int64) | |
427 | VFP_CONV_FIX(uh, d, 64, 64, uint16) | |
428 | VFP_CONV_FIX(ul, d, 64, 64, uint32) | |
429 | VFP_CONV_FIX_A64(uq, d, 64, 64, uint64) | |
430 | VFP_CONV_FIX(sh, s, 32, 32, int16) | |
431 | VFP_CONV_FIX(sl, s, 32, 32, int32) | |
432 | VFP_CONV_FIX_A64(sq, s, 32, 64, int64) | |
433 | VFP_CONV_FIX(uh, s, 32, 32, uint16) | |
434 | VFP_CONV_FIX(ul, s, 32, 32, uint32) | |
435 | VFP_CONV_FIX_A64(uq, s, 32, 64, uint64) | |
436 | ||
437 | #undef VFP_CONV_FIX | |
438 | #undef VFP_CONV_FIX_FLOAT | |
439 | #undef VFP_CONV_FLOAT_FIX_ROUND | |
440 | #undef VFP_CONV_FIX_A64 | |
441 | ||
442 | uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst) | |
443 | { | |
444 | return int32_to_float16_scalbn(x, -shift, fpst); | |
445 | } | |
446 | ||
447 | uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst) | |
448 | { | |
449 | return uint32_to_float16_scalbn(x, -shift, fpst); | |
450 | } | |
451 | ||
452 | uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst) | |
453 | { | |
454 | return int64_to_float16_scalbn(x, -shift, fpst); | |
455 | } | |
456 | ||
457 | uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst) | |
458 | { | |
459 | return uint64_to_float16_scalbn(x, -shift, fpst); | |
460 | } | |
461 | ||
462 | uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst) | |
463 | { | |
464 | if (unlikely(float16_is_any_nan(x))) { | |
465 | float_raise(float_flag_invalid, fpst); | |
466 | return 0; | |
467 | } | |
468 | return float16_to_int16_scalbn(x, get_float_rounding_mode(fpst), | |
469 | shift, fpst); | |
470 | } | |
471 | ||
472 | uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst) | |
473 | { | |
474 | if (unlikely(float16_is_any_nan(x))) { | |
475 | float_raise(float_flag_invalid, fpst); | |
476 | return 0; | |
477 | } | |
478 | return float16_to_uint16_scalbn(x, get_float_rounding_mode(fpst), | |
479 | shift, fpst); | |
480 | } | |
481 | ||
482 | uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst) | |
483 | { | |
484 | if (unlikely(float16_is_any_nan(x))) { | |
485 | float_raise(float_flag_invalid, fpst); | |
486 | return 0; | |
487 | } | |
488 | return float16_to_int32_scalbn(x, get_float_rounding_mode(fpst), | |
489 | shift, fpst); | |
490 | } | |
491 | ||
492 | uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst) | |
493 | { | |
494 | if (unlikely(float16_is_any_nan(x))) { | |
495 | float_raise(float_flag_invalid, fpst); | |
496 | return 0; | |
497 | } | |
498 | return float16_to_uint32_scalbn(x, get_float_rounding_mode(fpst), | |
499 | shift, fpst); | |
500 | } | |
501 | ||
502 | uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst) | |
503 | { | |
504 | if (unlikely(float16_is_any_nan(x))) { | |
505 | float_raise(float_flag_invalid, fpst); | |
506 | return 0; | |
507 | } | |
508 | return float16_to_int64_scalbn(x, get_float_rounding_mode(fpst), | |
509 | shift, fpst); | |
510 | } | |
511 | ||
512 | uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst) | |
513 | { | |
514 | if (unlikely(float16_is_any_nan(x))) { | |
515 | float_raise(float_flag_invalid, fpst); | |
516 | return 0; | |
517 | } | |
518 | return float16_to_uint64_scalbn(x, get_float_rounding_mode(fpst), | |
519 | shift, fpst); | |
520 | } | |
521 | ||
522 | /* Set the current fp rounding mode and return the old one. | |
523 | * The argument is a softfloat float_round_ value. | |
524 | */ | |
525 | uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp) | |
526 | { | |
527 | float_status *fp_status = fpstp; | |
528 | ||
529 | uint32_t prev_rmode = get_float_rounding_mode(fp_status); | |
530 | set_float_rounding_mode(rmode, fp_status); | |
531 | ||
532 | return prev_rmode; | |
533 | } | |
534 | ||
535 | /* Set the current fp rounding mode in the standard fp status and return | |
536 | * the old one. This is for NEON instructions that need to change the | |
537 | * rounding mode but wish to use the standard FPSCR values for everything | |
538 | * else. Always set the rounding mode back to the correct value after | |
539 | * modifying it. | |
540 | * The argument is a softfloat float_round_ value. | |
541 | */ | |
542 | uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env) | |
543 | { | |
544 | float_status *fp_status = &env->vfp.standard_fp_status; | |
545 | ||
546 | uint32_t prev_rmode = get_float_rounding_mode(fp_status); | |
547 | set_float_rounding_mode(rmode, fp_status); | |
548 | ||
549 | return prev_rmode; | |
550 | } | |
551 | ||
552 | /* Half precision conversions. */ | |
553 | float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode) | |
554 | { | |
555 | /* Squash FZ16 to 0 for the duration of conversion. In this case, | |
556 | * it would affect flushing input denormals. | |
557 | */ | |
558 | float_status *fpst = fpstp; | |
c120391c | 559 | bool save = get_flush_inputs_to_zero(fpst); |
37356079 RH |
560 | set_flush_inputs_to_zero(false, fpst); |
561 | float32 r = float16_to_float32(a, !ahp_mode, fpst); | |
562 | set_flush_inputs_to_zero(save, fpst); | |
563 | return r; | |
564 | } | |
565 | ||
566 | uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode) | |
567 | { | |
568 | /* Squash FZ16 to 0 for the duration of conversion. In this case, | |
569 | * it would affect flushing output denormals. | |
570 | */ | |
571 | float_status *fpst = fpstp; | |
c120391c | 572 | bool save = get_flush_to_zero(fpst); |
37356079 RH |
573 | set_flush_to_zero(false, fpst); |
574 | float16 r = float32_to_float16(a, !ahp_mode, fpst); | |
575 | set_flush_to_zero(save, fpst); | |
576 | return r; | |
577 | } | |
578 | ||
579 | float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode) | |
580 | { | |
581 | /* Squash FZ16 to 0 for the duration of conversion. In this case, | |
582 | * it would affect flushing input denormals. | |
583 | */ | |
584 | float_status *fpst = fpstp; | |
c120391c | 585 | bool save = get_flush_inputs_to_zero(fpst); |
37356079 RH |
586 | set_flush_inputs_to_zero(false, fpst); |
587 | float64 r = float16_to_float64(a, !ahp_mode, fpst); | |
588 | set_flush_inputs_to_zero(save, fpst); | |
589 | return r; | |
590 | } | |
591 | ||
592 | uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode) | |
593 | { | |
594 | /* Squash FZ16 to 0 for the duration of conversion. In this case, | |
595 | * it would affect flushing output denormals. | |
596 | */ | |
597 | float_status *fpst = fpstp; | |
c120391c | 598 | bool save = get_flush_to_zero(fpst); |
37356079 RH |
599 | set_flush_to_zero(false, fpst); |
600 | float16 r = float64_to_float16(a, !ahp_mode, fpst); | |
601 | set_flush_to_zero(save, fpst); | |
602 | return r; | |
603 | } | |
604 | ||
26c6f695 | 605 | float32 HELPER(recps_f32)(CPUARMState *env, float32 a, float32 b) |
37356079 RH |
606 | { |
607 | float_status *s = &env->vfp.standard_fp_status; | |
608 | if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || | |
609 | (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { | |
610 | if (!(float32_is_zero(a) || float32_is_zero(b))) { | |
611 | float_raise(float_flag_input_denormal, s); | |
612 | } | |
613 | return float32_two; | |
614 | } | |
615 | return float32_sub(float32_two, float32_mul(a, b, s), s); | |
616 | } | |
617 | ||
26c6f695 | 618 | float32 HELPER(rsqrts_f32)(CPUARMState *env, float32 a, float32 b) |
37356079 RH |
619 | { |
620 | float_status *s = &env->vfp.standard_fp_status; | |
621 | float32 product; | |
622 | if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || | |
623 | (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { | |
624 | if (!(float32_is_zero(a) || float32_is_zero(b))) { | |
625 | float_raise(float_flag_input_denormal, s); | |
626 | } | |
627 | return float32_one_point_five; | |
628 | } | |
629 | product = float32_mul(a, b, s); | |
630 | return float32_div(float32_sub(float32_three, product, s), float32_two, s); | |
631 | } | |
632 | ||
633 | /* NEON helpers. */ | |
634 | ||
635 | /* Constants 256 and 512 are used in some helpers; we avoid relying on | |
636 | * int->float conversions at run-time. */ | |
637 | #define float64_256 make_float64(0x4070000000000000LL) | |
638 | #define float64_512 make_float64(0x4080000000000000LL) | |
639 | #define float16_maxnorm make_float16(0x7bff) | |
640 | #define float32_maxnorm make_float32(0x7f7fffff) | |
641 | #define float64_maxnorm make_float64(0x7fefffffffffffffLL) | |
642 | ||
643 | /* Reciprocal functions | |
644 | * | |
645 | * The algorithm that must be used to calculate the estimate | |
646 | * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate | |
647 | */ | |
648 | ||
649 | /* See RecipEstimate() | |
650 | * | |
651 | * input is a 9 bit fixed point number | |
652 | * input range 256 .. 511 for a number from 0.5 <= x < 1.0. | |
653 | * result range 256 .. 511 for a number from 1.0 to 511/256. | |
654 | */ | |
655 | ||
656 | static int recip_estimate(int input) | |
657 | { | |
658 | int a, b, r; | |
659 | assert(256 <= input && input < 512); | |
660 | a = (input * 2) + 1; | |
661 | b = (1 << 19) / a; | |
662 | r = (b + 1) >> 1; | |
663 | assert(256 <= r && r < 512); | |
664 | return r; | |
665 | } | |
666 | ||
667 | /* | |
668 | * Common wrapper to call recip_estimate | |
669 | * | |
670 | * The parameters are exponent and 64 bit fraction (without implicit | |
671 | * bit) where the binary point is nominally at bit 52. Returns a | |
672 | * float64 which can then be rounded to the appropriate size by the | |
673 | * callee. | |
674 | */ | |
675 | ||
676 | static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac) | |
677 | { | |
678 | uint32_t scaled, estimate; | |
679 | uint64_t result_frac; | |
680 | int result_exp; | |
681 | ||
682 | /* Handle sub-normals */ | |
683 | if (*exp == 0) { | |
684 | if (extract64(frac, 51, 1) == 0) { | |
685 | *exp = -1; | |
686 | frac <<= 2; | |
687 | } else { | |
688 | frac <<= 1; | |
689 | } | |
690 | } | |
691 | ||
692 | /* scaled = UInt('1':fraction<51:44>) */ | |
693 | scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); | |
694 | estimate = recip_estimate(scaled); | |
695 | ||
696 | result_exp = exp_off - *exp; | |
697 | result_frac = deposit64(0, 44, 8, estimate); | |
698 | if (result_exp == 0) { | |
699 | result_frac = deposit64(result_frac >> 1, 51, 1, 1); | |
700 | } else if (result_exp == -1) { | |
701 | result_frac = deposit64(result_frac >> 2, 50, 2, 1); | |
702 | result_exp = 0; | |
703 | } | |
704 | ||
705 | *exp = result_exp; | |
706 | ||
707 | return result_frac; | |
708 | } | |
709 | ||
710 | static bool round_to_inf(float_status *fpst, bool sign_bit) | |
711 | { | |
712 | switch (fpst->float_rounding_mode) { | |
713 | case float_round_nearest_even: /* Round to Nearest */ | |
714 | return true; | |
715 | case float_round_up: /* Round to +Inf */ | |
716 | return !sign_bit; | |
717 | case float_round_down: /* Round to -Inf */ | |
718 | return sign_bit; | |
719 | case float_round_to_zero: /* Round to Zero */ | |
720 | return false; | |
3dede407 RH |
721 | default: |
722 | g_assert_not_reached(); | |
37356079 | 723 | } |
37356079 RH |
724 | } |
725 | ||
726 | uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp) | |
727 | { | |
728 | float_status *fpst = fpstp; | |
729 | float16 f16 = float16_squash_input_denormal(input, fpst); | |
730 | uint32_t f16_val = float16_val(f16); | |
731 | uint32_t f16_sign = float16_is_neg(f16); | |
732 | int f16_exp = extract32(f16_val, 10, 5); | |
733 | uint32_t f16_frac = extract32(f16_val, 0, 10); | |
734 | uint64_t f64_frac; | |
735 | ||
736 | if (float16_is_any_nan(f16)) { | |
737 | float16 nan = f16; | |
738 | if (float16_is_signaling_nan(f16, fpst)) { | |
739 | float_raise(float_flag_invalid, fpst); | |
740 | nan = float16_silence_nan(f16, fpst); | |
741 | } | |
742 | if (fpst->default_nan_mode) { | |
743 | nan = float16_default_nan(fpst); | |
744 | } | |
745 | return nan; | |
746 | } else if (float16_is_infinity(f16)) { | |
747 | return float16_set_sign(float16_zero, float16_is_neg(f16)); | |
748 | } else if (float16_is_zero(f16)) { | |
749 | float_raise(float_flag_divbyzero, fpst); | |
750 | return float16_set_sign(float16_infinity, float16_is_neg(f16)); | |
751 | } else if (float16_abs(f16) < (1 << 8)) { | |
752 | /* Abs(value) < 2.0^-16 */ | |
753 | float_raise(float_flag_overflow | float_flag_inexact, fpst); | |
754 | if (round_to_inf(fpst, f16_sign)) { | |
755 | return float16_set_sign(float16_infinity, f16_sign); | |
756 | } else { | |
757 | return float16_set_sign(float16_maxnorm, f16_sign); | |
758 | } | |
759 | } else if (f16_exp >= 29 && fpst->flush_to_zero) { | |
760 | float_raise(float_flag_underflow, fpst); | |
761 | return float16_set_sign(float16_zero, float16_is_neg(f16)); | |
762 | } | |
763 | ||
764 | f64_frac = call_recip_estimate(&f16_exp, 29, | |
765 | ((uint64_t) f16_frac) << (52 - 10)); | |
766 | ||
767 | /* result = sign : result_exp<4:0> : fraction<51:42> */ | |
768 | f16_val = deposit32(0, 15, 1, f16_sign); | |
769 | f16_val = deposit32(f16_val, 10, 5, f16_exp); | |
770 | f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10)); | |
771 | return make_float16(f16_val); | |
772 | } | |
773 | ||
774 | float32 HELPER(recpe_f32)(float32 input, void *fpstp) | |
775 | { | |
776 | float_status *fpst = fpstp; | |
777 | float32 f32 = float32_squash_input_denormal(input, fpst); | |
778 | uint32_t f32_val = float32_val(f32); | |
779 | bool f32_sign = float32_is_neg(f32); | |
780 | int f32_exp = extract32(f32_val, 23, 8); | |
781 | uint32_t f32_frac = extract32(f32_val, 0, 23); | |
782 | uint64_t f64_frac; | |
783 | ||
784 | if (float32_is_any_nan(f32)) { | |
785 | float32 nan = f32; | |
786 | if (float32_is_signaling_nan(f32, fpst)) { | |
787 | float_raise(float_flag_invalid, fpst); | |
788 | nan = float32_silence_nan(f32, fpst); | |
789 | } | |
790 | if (fpst->default_nan_mode) { | |
791 | nan = float32_default_nan(fpst); | |
792 | } | |
793 | return nan; | |
794 | } else if (float32_is_infinity(f32)) { | |
795 | return float32_set_sign(float32_zero, float32_is_neg(f32)); | |
796 | } else if (float32_is_zero(f32)) { | |
797 | float_raise(float_flag_divbyzero, fpst); | |
798 | return float32_set_sign(float32_infinity, float32_is_neg(f32)); | |
799 | } else if (float32_abs(f32) < (1ULL << 21)) { | |
800 | /* Abs(value) < 2.0^-128 */ | |
801 | float_raise(float_flag_overflow | float_flag_inexact, fpst); | |
802 | if (round_to_inf(fpst, f32_sign)) { | |
803 | return float32_set_sign(float32_infinity, f32_sign); | |
804 | } else { | |
805 | return float32_set_sign(float32_maxnorm, f32_sign); | |
806 | } | |
807 | } else if (f32_exp >= 253 && fpst->flush_to_zero) { | |
808 | float_raise(float_flag_underflow, fpst); | |
809 | return float32_set_sign(float32_zero, float32_is_neg(f32)); | |
810 | } | |
811 | ||
812 | f64_frac = call_recip_estimate(&f32_exp, 253, | |
813 | ((uint64_t) f32_frac) << (52 - 23)); | |
814 | ||
815 | /* result = sign : result_exp<7:0> : fraction<51:29> */ | |
816 | f32_val = deposit32(0, 31, 1, f32_sign); | |
817 | f32_val = deposit32(f32_val, 23, 8, f32_exp); | |
818 | f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23)); | |
819 | return make_float32(f32_val); | |
820 | } | |
821 | ||
822 | float64 HELPER(recpe_f64)(float64 input, void *fpstp) | |
823 | { | |
824 | float_status *fpst = fpstp; | |
825 | float64 f64 = float64_squash_input_denormal(input, fpst); | |
826 | uint64_t f64_val = float64_val(f64); | |
827 | bool f64_sign = float64_is_neg(f64); | |
828 | int f64_exp = extract64(f64_val, 52, 11); | |
829 | uint64_t f64_frac = extract64(f64_val, 0, 52); | |
830 | ||
831 | /* Deal with any special cases */ | |
832 | if (float64_is_any_nan(f64)) { | |
833 | float64 nan = f64; | |
834 | if (float64_is_signaling_nan(f64, fpst)) { | |
835 | float_raise(float_flag_invalid, fpst); | |
836 | nan = float64_silence_nan(f64, fpst); | |
837 | } | |
838 | if (fpst->default_nan_mode) { | |
839 | nan = float64_default_nan(fpst); | |
840 | } | |
841 | return nan; | |
842 | } else if (float64_is_infinity(f64)) { | |
843 | return float64_set_sign(float64_zero, float64_is_neg(f64)); | |
844 | } else if (float64_is_zero(f64)) { | |
845 | float_raise(float_flag_divbyzero, fpst); | |
846 | return float64_set_sign(float64_infinity, float64_is_neg(f64)); | |
847 | } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { | |
848 | /* Abs(value) < 2.0^-1024 */ | |
849 | float_raise(float_flag_overflow | float_flag_inexact, fpst); | |
850 | if (round_to_inf(fpst, f64_sign)) { | |
851 | return float64_set_sign(float64_infinity, f64_sign); | |
852 | } else { | |
853 | return float64_set_sign(float64_maxnorm, f64_sign); | |
854 | } | |
855 | } else if (f64_exp >= 2045 && fpst->flush_to_zero) { | |
856 | float_raise(float_flag_underflow, fpst); | |
857 | return float64_set_sign(float64_zero, float64_is_neg(f64)); | |
858 | } | |
859 | ||
860 | f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac); | |
861 | ||
862 | /* result = sign : result_exp<10:0> : fraction<51:0>; */ | |
863 | f64_val = deposit64(0, 63, 1, f64_sign); | |
864 | f64_val = deposit64(f64_val, 52, 11, f64_exp); | |
865 | f64_val = deposit64(f64_val, 0, 52, f64_frac); | |
866 | return make_float64(f64_val); | |
867 | } | |
868 | ||
869 | /* The algorithm that must be used to calculate the estimate | |
870 | * is specified by the ARM ARM. | |
871 | */ | |
872 | ||
873 | static int do_recip_sqrt_estimate(int a) | |
874 | { | |
875 | int b, estimate; | |
876 | ||
877 | assert(128 <= a && a < 512); | |
878 | if (a < 256) { | |
879 | a = a * 2 + 1; | |
880 | } else { | |
881 | a = (a >> 1) << 1; | |
882 | a = (a + 1) * 2; | |
883 | } | |
884 | b = 512; | |
885 | while (a * (b + 1) * (b + 1) < (1 << 28)) { | |
886 | b += 1; | |
887 | } | |
888 | estimate = (b + 1) / 2; | |
889 | assert(256 <= estimate && estimate < 512); | |
890 | ||
891 | return estimate; | |
892 | } | |
893 | ||
894 | ||
895 | static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac) | |
896 | { | |
897 | int estimate; | |
898 | uint32_t scaled; | |
899 | ||
900 | if (*exp == 0) { | |
901 | while (extract64(frac, 51, 1) == 0) { | |
902 | frac = frac << 1; | |
903 | *exp -= 1; | |
904 | } | |
905 | frac = extract64(frac, 0, 51) << 1; | |
906 | } | |
907 | ||
908 | if (*exp & 1) { | |
909 | /* scaled = UInt('01':fraction<51:45>) */ | |
910 | scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); | |
911 | } else { | |
912 | /* scaled = UInt('1':fraction<51:44>) */ | |
913 | scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); | |
914 | } | |
915 | estimate = do_recip_sqrt_estimate(scaled); | |
916 | ||
917 | *exp = (exp_off - *exp) / 2; | |
918 | return extract64(estimate, 0, 8) << 44; | |
919 | } | |
920 | ||
921 | uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp) | |
922 | { | |
923 | float_status *s = fpstp; | |
924 | float16 f16 = float16_squash_input_denormal(input, s); | |
925 | uint16_t val = float16_val(f16); | |
926 | bool f16_sign = float16_is_neg(f16); | |
927 | int f16_exp = extract32(val, 10, 5); | |
928 | uint16_t f16_frac = extract32(val, 0, 10); | |
929 | uint64_t f64_frac; | |
930 | ||
931 | if (float16_is_any_nan(f16)) { | |
932 | float16 nan = f16; | |
933 | if (float16_is_signaling_nan(f16, s)) { | |
934 | float_raise(float_flag_invalid, s); | |
935 | nan = float16_silence_nan(f16, s); | |
936 | } | |
937 | if (s->default_nan_mode) { | |
938 | nan = float16_default_nan(s); | |
939 | } | |
940 | return nan; | |
941 | } else if (float16_is_zero(f16)) { | |
942 | float_raise(float_flag_divbyzero, s); | |
943 | return float16_set_sign(float16_infinity, f16_sign); | |
944 | } else if (f16_sign) { | |
945 | float_raise(float_flag_invalid, s); | |
946 | return float16_default_nan(s); | |
947 | } else if (float16_is_infinity(f16)) { | |
948 | return float16_zero; | |
949 | } | |
950 | ||
951 | /* Scale and normalize to a double-precision value between 0.25 and 1.0, | |
952 | * preserving the parity of the exponent. */ | |
953 | ||
954 | f64_frac = ((uint64_t) f16_frac) << (52 - 10); | |
955 | ||
956 | f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac); | |
957 | ||
958 | /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */ | |
959 | val = deposit32(0, 15, 1, f16_sign); | |
960 | val = deposit32(val, 10, 5, f16_exp); | |
961 | val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8)); | |
962 | return make_float16(val); | |
963 | } | |
964 | ||
965 | float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) | |
966 | { | |
967 | float_status *s = fpstp; | |
968 | float32 f32 = float32_squash_input_denormal(input, s); | |
969 | uint32_t val = float32_val(f32); | |
970 | uint32_t f32_sign = float32_is_neg(f32); | |
971 | int f32_exp = extract32(val, 23, 8); | |
972 | uint32_t f32_frac = extract32(val, 0, 23); | |
973 | uint64_t f64_frac; | |
974 | ||
975 | if (float32_is_any_nan(f32)) { | |
976 | float32 nan = f32; | |
977 | if (float32_is_signaling_nan(f32, s)) { | |
978 | float_raise(float_flag_invalid, s); | |
979 | nan = float32_silence_nan(f32, s); | |
980 | } | |
981 | if (s->default_nan_mode) { | |
982 | nan = float32_default_nan(s); | |
983 | } | |
984 | return nan; | |
985 | } else if (float32_is_zero(f32)) { | |
986 | float_raise(float_flag_divbyzero, s); | |
987 | return float32_set_sign(float32_infinity, float32_is_neg(f32)); | |
988 | } else if (float32_is_neg(f32)) { | |
989 | float_raise(float_flag_invalid, s); | |
990 | return float32_default_nan(s); | |
991 | } else if (float32_is_infinity(f32)) { | |
992 | return float32_zero; | |
993 | } | |
994 | ||
995 | /* Scale and normalize to a double-precision value between 0.25 and 1.0, | |
996 | * preserving the parity of the exponent. */ | |
997 | ||
998 | f64_frac = ((uint64_t) f32_frac) << 29; | |
999 | ||
1000 | f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac); | |
1001 | ||
1002 | /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */ | |
1003 | val = deposit32(0, 31, 1, f32_sign); | |
1004 | val = deposit32(val, 23, 8, f32_exp); | |
1005 | val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); | |
1006 | return make_float32(val); | |
1007 | } | |
1008 | ||
1009 | float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) | |
1010 | { | |
1011 | float_status *s = fpstp; | |
1012 | float64 f64 = float64_squash_input_denormal(input, s); | |
1013 | uint64_t val = float64_val(f64); | |
1014 | bool f64_sign = float64_is_neg(f64); | |
1015 | int f64_exp = extract64(val, 52, 11); | |
1016 | uint64_t f64_frac = extract64(val, 0, 52); | |
1017 | ||
1018 | if (float64_is_any_nan(f64)) { | |
1019 | float64 nan = f64; | |
1020 | if (float64_is_signaling_nan(f64, s)) { | |
1021 | float_raise(float_flag_invalid, s); | |
1022 | nan = float64_silence_nan(f64, s); | |
1023 | } | |
1024 | if (s->default_nan_mode) { | |
1025 | nan = float64_default_nan(s); | |
1026 | } | |
1027 | return nan; | |
1028 | } else if (float64_is_zero(f64)) { | |
1029 | float_raise(float_flag_divbyzero, s); | |
1030 | return float64_set_sign(float64_infinity, float64_is_neg(f64)); | |
1031 | } else if (float64_is_neg(f64)) { | |
1032 | float_raise(float_flag_invalid, s); | |
1033 | return float64_default_nan(s); | |
1034 | } else if (float64_is_infinity(f64)) { | |
1035 | return float64_zero; | |
1036 | } | |
1037 | ||
1038 | f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac); | |
1039 | ||
1040 | /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */ | |
1041 | val = deposit64(0, 61, 1, f64_sign); | |
1042 | val = deposit64(val, 52, 11, f64_exp); | |
1043 | val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8)); | |
1044 | return make_float64(val); | |
1045 | } | |
1046 | ||
fe6fb4be | 1047 | uint32_t HELPER(recpe_u32)(uint32_t a) |
37356079 | 1048 | { |
37356079 RH |
1049 | int input, estimate; |
1050 | ||
1051 | if ((a & 0x80000000) == 0) { | |
1052 | return 0xffffffff; | |
1053 | } | |
1054 | ||
1055 | input = extract32(a, 23, 9); | |
1056 | estimate = recip_estimate(input); | |
1057 | ||
1058 | return deposit32(0, (32 - 9), 9, estimate); | |
1059 | } | |
1060 | ||
fe6fb4be | 1061 | uint32_t HELPER(rsqrte_u32)(uint32_t a) |
37356079 RH |
1062 | { |
1063 | int estimate; | |
1064 | ||
1065 | if ((a & 0xc0000000) == 0) { | |
1066 | return 0xffffffff; | |
1067 | } | |
1068 | ||
1069 | estimate = do_recip_sqrt_estimate(extract32(a, 23, 9)); | |
1070 | ||
1071 | return deposit32(0, 23, 9, estimate); | |
1072 | } | |
1073 | ||
1074 | /* VFPv4 fused multiply-accumulate */ | |
9886fe28 PM |
1075 | dh_ctype_f16 VFP_HELPER(muladd, h)(dh_ctype_f16 a, dh_ctype_f16 b, |
1076 | dh_ctype_f16 c, void *fpstp) | |
1077 | { | |
1078 | float_status *fpst = fpstp; | |
1079 | return float16_muladd(a, b, c, 0, fpst); | |
1080 | } | |
1081 | ||
37356079 RH |
1082 | float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) |
1083 | { | |
1084 | float_status *fpst = fpstp; | |
1085 | return float32_muladd(a, b, c, 0, fpst); | |
1086 | } | |
1087 | ||
1088 | float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) | |
1089 | { | |
1090 | float_status *fpst = fpstp; | |
1091 | return float64_muladd(a, b, c, 0, fpst); | |
1092 | } | |
1093 | ||
1094 | /* ARMv8 round to integral */ | |
1095 | float32 HELPER(rints_exact)(float32 x, void *fp_status) | |
1096 | { | |
1097 | return float32_round_to_int(x, fp_status); | |
1098 | } | |
1099 | ||
1100 | float64 HELPER(rintd_exact)(float64 x, void *fp_status) | |
1101 | { | |
1102 | return float64_round_to_int(x, fp_status); | |
1103 | } | |
1104 | ||
1105 | float32 HELPER(rints)(float32 x, void *fp_status) | |
1106 | { | |
1107 | int old_flags = get_float_exception_flags(fp_status), new_flags; | |
1108 | float32 ret; | |
1109 | ||
1110 | ret = float32_round_to_int(x, fp_status); | |
1111 | ||
1112 | /* Suppress any inexact exceptions the conversion produced */ | |
1113 | if (!(old_flags & float_flag_inexact)) { | |
1114 | new_flags = get_float_exception_flags(fp_status); | |
1115 | set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); | |
1116 | } | |
1117 | ||
1118 | return ret; | |
1119 | } | |
1120 | ||
1121 | float64 HELPER(rintd)(float64 x, void *fp_status) | |
1122 | { | |
1123 | int old_flags = get_float_exception_flags(fp_status), new_flags; | |
1124 | float64 ret; | |
1125 | ||
1126 | ret = float64_round_to_int(x, fp_status); | |
1127 | ||
1128 | new_flags = get_float_exception_flags(fp_status); | |
1129 | ||
1130 | /* Suppress any inexact exceptions the conversion produced */ | |
1131 | if (!(old_flags & float_flag_inexact)) { | |
1132 | new_flags = get_float_exception_flags(fp_status); | |
1133 | set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); | |
1134 | } | |
1135 | ||
1136 | return ret; | |
1137 | } | |
1138 | ||
1139 | /* Convert ARM rounding mode to softfloat */ | |
1140 | int arm_rmode_to_sf(int rmode) | |
1141 | { | |
1142 | switch (rmode) { | |
1143 | case FPROUNDING_TIEAWAY: | |
1144 | rmode = float_round_ties_away; | |
1145 | break; | |
1146 | case FPROUNDING_ODD: | |
1147 | /* FIXME: add support for TIEAWAY and ODD */ | |
1148 | qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", | |
1149 | rmode); | |
1150 | /* fall through for now */ | |
1151 | case FPROUNDING_TIEEVEN: | |
1152 | default: | |
1153 | rmode = float_round_nearest_even; | |
1154 | break; | |
1155 | case FPROUNDING_POSINF: | |
1156 | rmode = float_round_up; | |
1157 | break; | |
1158 | case FPROUNDING_NEGINF: | |
1159 | rmode = float_round_down; | |
1160 | break; | |
1161 | case FPROUNDING_ZERO: | |
1162 | rmode = float_round_to_zero; | |
1163 | break; | |
1164 | } | |
1165 | return rmode; | |
1166 | } | |
6c1f6f27 RH |
1167 | |
1168 | /* | |
1169 | * Implement float64 to int32_t conversion without saturation; | |
1170 | * the result is supplied modulo 2^32. | |
1171 | */ | |
1172 | uint64_t HELPER(fjcvtzs)(float64 value, void *vstatus) | |
1173 | { | |
1174 | float_status *status = vstatus; | |
1175 | uint32_t exp, sign; | |
1176 | uint64_t frac; | |
1177 | uint32_t inexact = 1; /* !Z */ | |
1178 | ||
1179 | sign = extract64(value, 63, 1); | |
1180 | exp = extract64(value, 52, 11); | |
1181 | frac = extract64(value, 0, 52); | |
1182 | ||
1183 | if (exp == 0) { | |
1184 | /* While not inexact for IEEE FP, -0.0 is inexact for JavaScript. */ | |
1185 | inexact = sign; | |
1186 | if (frac != 0) { | |
1187 | if (status->flush_inputs_to_zero) { | |
1188 | float_raise(float_flag_input_denormal, status); | |
1189 | } else { | |
1190 | float_raise(float_flag_inexact, status); | |
1191 | inexact = 1; | |
1192 | } | |
1193 | } | |
1194 | frac = 0; | |
1195 | } else if (exp == 0x7ff) { | |
1196 | /* This operation raises Invalid for both NaN and overflow (Inf). */ | |
1197 | float_raise(float_flag_invalid, status); | |
1198 | frac = 0; | |
1199 | } else { | |
1200 | int true_exp = exp - 1023; | |
1201 | int shift = true_exp - 52; | |
1202 | ||
1203 | /* Restore implicit bit. */ | |
1204 | frac |= 1ull << 52; | |
1205 | ||
1206 | /* Shift the fraction into place. */ | |
1207 | if (shift >= 0) { | |
1208 | /* The number is so large we must shift the fraction left. */ | |
1209 | if (shift >= 64) { | |
1210 | /* The fraction is shifted out entirely. */ | |
1211 | frac = 0; | |
1212 | } else { | |
1213 | frac <<= shift; | |
1214 | } | |
1215 | } else if (shift > -64) { | |
1216 | /* Normal case -- shift right and notice if bits shift out. */ | |
1217 | inexact = (frac << (64 + shift)) != 0; | |
1218 | frac >>= -shift; | |
1219 | } else { | |
1220 | /* The fraction is shifted out entirely. */ | |
1221 | frac = 0; | |
1222 | } | |
1223 | ||
1224 | /* Notice overflow or inexact exceptions. */ | |
1225 | if (true_exp > 31 || frac > (sign ? 0x80000000ull : 0x7fffffff)) { | |
1226 | /* Overflow, for which this operation raises invalid. */ | |
1227 | float_raise(float_flag_invalid, status); | |
1228 | inexact = 1; | |
1229 | } else if (inexact) { | |
1230 | float_raise(float_flag_inexact, status); | |
1231 | } | |
1232 | ||
1233 | /* Honor the sign. */ | |
1234 | if (sign) { | |
1235 | frac = -frac; | |
1236 | } | |
1237 | } | |
1238 | ||
1239 | /* Pack the result and the env->ZF representation of Z together. */ | |
1240 | return deposit64(frac, 32, 32, inexact); | |
1241 | } | |
1242 | ||
1243 | uint32_t HELPER(vjcvt)(float64 value, CPUARMState *env) | |
1244 | { | |
1245 | uint64_t pair = HELPER(fjcvtzs)(value, &env->vfp.fp_status); | |
1246 | uint32_t result = pair; | |
1247 | uint32_t z = (pair >> 32) == 0; | |
1248 | ||
1249 | /* Store Z, clear NCV, in FPSCR.NZCV. */ | |
1250 | env->vfp.xregs[ARM_VFP_FPSCR] | |
1251 | = (env->vfp.xregs[ARM_VFP_FPSCR] & ~CPSR_NZCV) | (z * CPSR_Z); | |
1252 | ||
1253 | return result; | |
1254 | } | |
6bea2563 RH |
1255 | |
1256 | /* Round a float32 to an integer that fits in int32_t or int64_t. */ | |
1257 | static float32 frint_s(float32 f, float_status *fpst, int intsize) | |
1258 | { | |
1259 | int old_flags = get_float_exception_flags(fpst); | |
1260 | uint32_t exp = extract32(f, 23, 8); | |
1261 | ||
1262 | if (unlikely(exp == 0xff)) { | |
1263 | /* NaN or Inf. */ | |
1264 | goto overflow; | |
1265 | } | |
1266 | ||
1267 | /* Round and re-extract the exponent. */ | |
1268 | f = float32_round_to_int(f, fpst); | |
1269 | exp = extract32(f, 23, 8); | |
1270 | ||
1271 | /* Validate the range of the result. */ | |
1272 | if (exp < 126 + intsize) { | |
1273 | /* abs(F) <= INT{N}_MAX */ | |
1274 | return f; | |
1275 | } | |
1276 | if (exp == 126 + intsize) { | |
1277 | uint32_t sign = extract32(f, 31, 1); | |
1278 | uint32_t frac = extract32(f, 0, 23); | |
1279 | if (sign && frac == 0) { | |
1280 | /* F == INT{N}_MIN */ | |
1281 | return f; | |
1282 | } | |
1283 | } | |
1284 | ||
1285 | overflow: | |
1286 | /* | |
1287 | * Raise Invalid and return INT{N}_MIN as a float. Revert any | |
1288 | * inexact exception float32_round_to_int may have raised. | |
1289 | */ | |
1290 | set_float_exception_flags(old_flags | float_flag_invalid, fpst); | |
1291 | return (0x100u + 126u + intsize) << 23; | |
1292 | } | |
1293 | ||
1294 | float32 HELPER(frint32_s)(float32 f, void *fpst) | |
1295 | { | |
1296 | return frint_s(f, fpst, 32); | |
1297 | } | |
1298 | ||
1299 | float32 HELPER(frint64_s)(float32 f, void *fpst) | |
1300 | { | |
1301 | return frint_s(f, fpst, 64); | |
1302 | } | |
1303 | ||
1304 | /* Round a float64 to an integer that fits in int32_t or int64_t. */ | |
1305 | static float64 frint_d(float64 f, float_status *fpst, int intsize) | |
1306 | { | |
1307 | int old_flags = get_float_exception_flags(fpst); | |
1308 | uint32_t exp = extract64(f, 52, 11); | |
1309 | ||
1310 | if (unlikely(exp == 0x7ff)) { | |
1311 | /* NaN or Inf. */ | |
1312 | goto overflow; | |
1313 | } | |
1314 | ||
1315 | /* Round and re-extract the exponent. */ | |
1316 | f = float64_round_to_int(f, fpst); | |
1317 | exp = extract64(f, 52, 11); | |
1318 | ||
1319 | /* Validate the range of the result. */ | |
1320 | if (exp < 1022 + intsize) { | |
1321 | /* abs(F) <= INT{N}_MAX */ | |
1322 | return f; | |
1323 | } | |
1324 | if (exp == 1022 + intsize) { | |
1325 | uint64_t sign = extract64(f, 63, 1); | |
1326 | uint64_t frac = extract64(f, 0, 52); | |
1327 | if (sign && frac == 0) { | |
1328 | /* F == INT{N}_MIN */ | |
1329 | return f; | |
1330 | } | |
1331 | } | |
1332 | ||
1333 | overflow: | |
1334 | /* | |
1335 | * Raise Invalid and return INT{N}_MIN as a float. Revert any | |
1336 | * inexact exception float64_round_to_int may have raised. | |
1337 | */ | |
1338 | set_float_exception_flags(old_flags | float_flag_invalid, fpst); | |
1339 | return (uint64_t)(0x800 + 1022 + intsize) << 52; | |
1340 | } | |
1341 | ||
1342 | float64 HELPER(frint32_d)(float64 f, void *fpst) | |
1343 | { | |
1344 | return frint_d(f, fpst, 32); | |
1345 | } | |
1346 | ||
1347 | float64 HELPER(frint64_d)(float64 f, void *fpst) | |
1348 | { | |
1349 | return frint_d(f, fpst, 64); | |
1350 | } | |
4a15527c | 1351 | |
9ca1d776 MZ |
1352 | void HELPER(check_hcr_el2_trap)(CPUARMState *env, uint32_t rt, uint32_t reg) |
1353 | { | |
1354 | uint32_t syndrome; | |
1355 | ||
1356 | switch (reg) { | |
1357 | case ARM_VFP_MVFR0: | |
1358 | case ARM_VFP_MVFR1: | |
1359 | case ARM_VFP_MVFR2: | |
1360 | if (!(arm_hcr_el2_eff(env) & HCR_TID3)) { | |
1361 | return; | |
1362 | } | |
1363 | break; | |
1364 | case ARM_VFP_FPSID: | |
1365 | if (!(arm_hcr_el2_eff(env) & HCR_TID0)) { | |
1366 | return; | |
1367 | } | |
1368 | break; | |
1369 | default: | |
1370 | g_assert_not_reached(); | |
1371 | } | |
1372 | ||
1373 | syndrome = ((EC_FPIDTRAP << ARM_EL_EC_SHIFT) | |
1374 | | ARM_EL_IL | |
1375 | | (1 << 24) | (0xe << 20) | (7 << 14) | |
1376 | | (reg << 10) | (rt << 5) | 1); | |
1377 | ||
1378 | raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); | |
1379 | } | |
1380 | ||
4a15527c | 1381 | #endif |