]>
Commit | Line | Data |
---|---|---|
3ac1f813 EC |
1 | /* |
2 | * In this file we wrap QEMU FP functions to look like softfloat/testfloat's, | |
3 | * so that we can use the testfloat infrastructure as-is. | |
4 | * | |
5 | * This file must be included directly from fp-test.c. We could compile it | |
6 | * separately, but it would be tedious to add declarations for all the wrappers. | |
7 | */ | |
8 | ||
9 | static signed char sf_tininess_to_qemu(uint_fast8_t mode) | |
10 | { | |
11 | switch (mode) { | |
12 | case softfloat_tininess_beforeRounding: | |
13 | return float_tininess_before_rounding; | |
14 | case softfloat_tininess_afterRounding: | |
15 | return float_tininess_after_rounding; | |
16 | default: | |
17 | g_assert_not_reached(); | |
18 | } | |
19 | } | |
20 | ||
21 | static signed char sf_rounding_to_qemu(uint_fast8_t mode) | |
22 | { | |
23 | switch (mode) { | |
24 | case softfloat_round_near_even: | |
25 | return float_round_nearest_even; | |
26 | case softfloat_round_minMag: | |
27 | return float_round_to_zero; | |
28 | case softfloat_round_min: | |
29 | return float_round_down; | |
30 | case softfloat_round_max: | |
31 | return float_round_up; | |
32 | case softfloat_round_near_maxMag: | |
33 | return float_round_ties_away; | |
34 | case softfloat_round_odd: | |
35 | return float_round_to_odd; | |
36 | default: | |
37 | g_assert_not_reached(); | |
38 | } | |
39 | } | |
40 | ||
41 | static uint_fast8_t qemu_flags_to_sf(uint8_t qflags) | |
42 | { | |
43 | uint_fast8_t ret = 0; | |
44 | ||
45 | if (qflags & float_flag_invalid) { | |
46 | ret |= softfloat_flag_invalid; | |
47 | } | |
48 | if (qflags & float_flag_divbyzero) { | |
49 | ret |= softfloat_flag_infinite; | |
50 | } | |
51 | if (qflags & float_flag_overflow) { | |
52 | ret |= softfloat_flag_overflow; | |
53 | } | |
54 | if (qflags & float_flag_underflow) { | |
55 | ret |= softfloat_flag_underflow; | |
56 | } | |
57 | if (qflags & float_flag_inexact) { | |
58 | ret |= softfloat_flag_inexact; | |
59 | } | |
60 | return ret; | |
61 | } | |
62 | ||
63 | /* | |
64 | * floatx80 and float128 cannot be cast between qemu and softfloat, because | |
65 | * in softfloat the order of the fields depends on the host's endianness. | |
66 | */ | |
67 | static extFloat80_t qemu_to_soft80(floatx80 a) | |
68 | { | |
69 | extFloat80_t ret; | |
70 | ||
71 | ret.signif = a.low; | |
72 | ret.signExp = a.high; | |
73 | return ret; | |
74 | } | |
75 | ||
76 | static floatx80 soft_to_qemu80(extFloat80_t a) | |
77 | { | |
78 | floatx80 ret; | |
79 | ||
80 | ret.low = a.signif; | |
81 | ret.high = a.signExp; | |
82 | return ret; | |
83 | } | |
84 | ||
85 | static float128_t qemu_to_soft128(float128 a) | |
86 | { | |
87 | float128_t ret; | |
88 | struct uint128 *to = (struct uint128 *)&ret; | |
89 | ||
90 | to->v0 = a.low; | |
91 | to->v64 = a.high; | |
92 | return ret; | |
93 | } | |
94 | ||
95 | static float128 soft_to_qemu128(float128_t a) | |
96 | { | |
97 | struct uint128 *from = (struct uint128 *)&a; | |
98 | float128 ret; | |
99 | ||
100 | ret.low = from->v0; | |
101 | ret.high = from->v64; | |
102 | return ret; | |
103 | } | |
104 | ||
105 | /* conversions */ | |
106 | #define WRAP_SF_TO_SF_IEEE(name, func, a_type, b_type) \ | |
107 | static b_type##_t name(a_type##_t a) \ | |
108 | { \ | |
109 | a_type *ap = (a_type *)&a; \ | |
110 | b_type ret; \ | |
111 | \ | |
112 | ret = func(*ap, true, &qsf); \ | |
113 | return *(b_type##_t *)&ret; \ | |
114 | } | |
115 | ||
116 | WRAP_SF_TO_SF_IEEE(qemu_f16_to_f32, float16_to_float32, float16, float32) | |
117 | WRAP_SF_TO_SF_IEEE(qemu_f16_to_f64, float16_to_float64, float16, float64) | |
118 | ||
119 | WRAP_SF_TO_SF_IEEE(qemu_f32_to_f16, float32_to_float16, float32, float16) | |
120 | WRAP_SF_TO_SF_IEEE(qemu_f64_to_f16, float64_to_float16, float64, float16) | |
121 | #undef WRAP_SF_TO_SF_IEEE | |
122 | ||
123 | #define WRAP_SF_TO_SF(name, func, a_type, b_type) \ | |
124 | static b_type##_t name(a_type##_t a) \ | |
125 | { \ | |
126 | a_type *ap = (a_type *)&a; \ | |
127 | b_type ret; \ | |
128 | \ | |
129 | ret = func(*ap, &qsf); \ | |
130 | return *(b_type##_t *)&ret; \ | |
131 | } | |
132 | ||
133 | WRAP_SF_TO_SF(qemu_f32_to_f64, float32_to_float64, float32, float64) | |
134 | WRAP_SF_TO_SF(qemu_f64_to_f32, float64_to_float32, float64, float32) | |
135 | #undef WRAP_SF_TO_SF | |
136 | ||
137 | #define WRAP_SF_TO_80(name, func, type) \ | |
138 | static void name(type##_t a, extFloat80_t *res) \ | |
139 | { \ | |
140 | floatx80 ret; \ | |
141 | type *ap = (type *)&a; \ | |
142 | \ | |
143 | ret = func(*ap, &qsf); \ | |
144 | *res = qemu_to_soft80(ret); \ | |
145 | } | |
146 | ||
147 | WRAP_SF_TO_80(qemu_f32_to_extF80M, float32_to_floatx80, float32) | |
148 | WRAP_SF_TO_80(qemu_f64_to_extF80M, float64_to_floatx80, float64) | |
149 | #undef WRAP_SF_TO_80 | |
150 | ||
151 | #define WRAP_SF_TO_128(name, func, type) \ | |
152 | static void name(type##_t a, float128_t *res) \ | |
153 | { \ | |
154 | float128 ret; \ | |
155 | type *ap = (type *)&a; \ | |
156 | \ | |
157 | ret = func(*ap, &qsf); \ | |
158 | *res = qemu_to_soft128(ret); \ | |
159 | } | |
160 | ||
161 | WRAP_SF_TO_128(qemu_f32_to_f128M, float32_to_float128, float32) | |
162 | WRAP_SF_TO_128(qemu_f64_to_f128M, float64_to_float128, float64) | |
163 | #undef WRAP_SF_TO_128 | |
164 | ||
165 | /* Note: exact is ignored since qemu's softfloat assumes it is set */ | |
166 | #define WRAP_SF_TO_INT(name, func, type, fast_type) \ | |
167 | static fast_type name(type##_t a, uint_fast8_t round, bool exact) \ | |
168 | { \ | |
169 | type *ap = (type *)&a; \ | |
170 | \ | |
171 | qsf.float_rounding_mode = sf_rounding_to_qemu(round); \ | |
172 | return func(*ap, &qsf); \ | |
173 | } | |
174 | ||
175 | WRAP_SF_TO_INT(qemu_f16_to_ui32, float16_to_uint32, float16, uint_fast32_t) | |
176 | WRAP_SF_TO_INT(qemu_f16_to_ui64, float16_to_uint64, float16, uint_fast64_t) | |
177 | ||
178 | WRAP_SF_TO_INT(qemu_f32_to_ui32, float32_to_uint32, float32, uint_fast32_t) | |
179 | WRAP_SF_TO_INT(qemu_f32_to_ui64, float32_to_uint64, float32, uint_fast64_t) | |
180 | ||
181 | WRAP_SF_TO_INT(qemu_f64_to_ui32, float64_to_uint32, float64, uint_fast32_t) | |
182 | WRAP_SF_TO_INT(qemu_f64_to_ui64, float64_to_uint64, float64, uint_fast64_t) | |
183 | ||
184 | WRAP_SF_TO_INT(qemu_f16_to_i32, float16_to_int32, float16, int_fast32_t) | |
185 | WRAP_SF_TO_INT(qemu_f16_to_i64, float16_to_int64, float16, int_fast64_t) | |
186 | ||
187 | WRAP_SF_TO_INT(qemu_f32_to_i32, float32_to_int32, float32, int_fast32_t) | |
188 | WRAP_SF_TO_INT(qemu_f32_to_i64, float32_to_int64, float32, int_fast64_t) | |
189 | ||
190 | WRAP_SF_TO_INT(qemu_f64_to_i32, float64_to_int32, float64, int_fast32_t) | |
191 | WRAP_SF_TO_INT(qemu_f64_to_i64, float64_to_int64, float64, int_fast64_t) | |
192 | #undef WRAP_SF_TO_INT | |
193 | ||
194 | /* Note: exact is ignored since qemu's softfloat assumes it is set */ | |
195 | #define WRAP_SF_TO_INT_MINMAG(name, func, type, fast_type) \ | |
196 | static fast_type name(type##_t a, bool exact) \ | |
197 | { \ | |
198 | type *ap = (type *)&a; \ | |
199 | \ | |
200 | return func(*ap, &qsf); \ | |
201 | } | |
202 | ||
203 | WRAP_SF_TO_INT_MINMAG(qemu_f16_to_ui32_r_minMag, | |
204 | float16_to_uint32_round_to_zero, float16, uint_fast32_t) | |
205 | WRAP_SF_TO_INT_MINMAG(qemu_f16_to_ui64_r_minMag, | |
206 | float16_to_uint64_round_to_zero, float16, uint_fast64_t) | |
207 | ||
208 | WRAP_SF_TO_INT_MINMAG(qemu_f16_to_i32_r_minMag, | |
209 | float16_to_int32_round_to_zero, float16, int_fast32_t) | |
210 | WRAP_SF_TO_INT_MINMAG(qemu_f16_to_i64_r_minMag, | |
211 | float16_to_int64_round_to_zero, float16, int_fast64_t) | |
212 | ||
213 | WRAP_SF_TO_INT_MINMAG(qemu_f32_to_ui32_r_minMag, | |
214 | float32_to_uint32_round_to_zero, float32, uint_fast32_t) | |
215 | WRAP_SF_TO_INT_MINMAG(qemu_f32_to_ui64_r_minMag, | |
216 | float32_to_uint64_round_to_zero, float32, uint_fast64_t) | |
217 | ||
218 | WRAP_SF_TO_INT_MINMAG(qemu_f32_to_i32_r_minMag, | |
219 | float32_to_int32_round_to_zero, float32, int_fast32_t) | |
220 | WRAP_SF_TO_INT_MINMAG(qemu_f32_to_i64_r_minMag, | |
221 | float32_to_int64_round_to_zero, float32, int_fast64_t) | |
222 | ||
223 | WRAP_SF_TO_INT_MINMAG(qemu_f64_to_ui32_r_minMag, | |
224 | float64_to_uint32_round_to_zero, float64, uint_fast32_t) | |
225 | WRAP_SF_TO_INT_MINMAG(qemu_f64_to_ui64_r_minMag, | |
226 | float64_to_uint64_round_to_zero, float64, uint_fast64_t) | |
227 | ||
228 | WRAP_SF_TO_INT_MINMAG(qemu_f64_to_i32_r_minMag, | |
229 | float64_to_int32_round_to_zero, float64, int_fast32_t) | |
230 | WRAP_SF_TO_INT_MINMAG(qemu_f64_to_i64_r_minMag, | |
231 | float64_to_int64_round_to_zero, float64, int_fast64_t) | |
232 | #undef WRAP_SF_TO_INT_MINMAG | |
233 | ||
234 | #define WRAP_80_TO_SF(name, func, type) \ | |
235 | static type##_t name(const extFloat80_t *ap) \ | |
236 | { \ | |
237 | floatx80 a; \ | |
238 | type ret; \ | |
239 | \ | |
240 | a = soft_to_qemu80(*ap); \ | |
241 | ret = func(a, &qsf); \ | |
242 | return *(type##_t *)&ret; \ | |
243 | } | |
244 | ||
245 | WRAP_80_TO_SF(qemu_extF80M_to_f32, floatx80_to_float32, float32) | |
246 | WRAP_80_TO_SF(qemu_extF80M_to_f64, floatx80_to_float64, float64) | |
247 | #undef WRAP_80_TO_SF | |
248 | ||
249 | #define WRAP_128_TO_SF(name, func, type) \ | |
250 | static type##_t name(const float128_t *ap) \ | |
251 | { \ | |
252 | float128 a; \ | |
253 | type ret; \ | |
254 | \ | |
255 | a = soft_to_qemu128(*ap); \ | |
256 | ret = func(a, &qsf); \ | |
257 | return *(type##_t *)&ret; \ | |
258 | } | |
259 | ||
260 | WRAP_128_TO_SF(qemu_f128M_to_f32, float128_to_float32, float32) | |
261 | WRAP_128_TO_SF(qemu_f128M_to_f64, float128_to_float64, float64) | |
262 | #undef WRAP_128_TO_SF | |
263 | ||
264 | static void qemu_extF80M_to_f128M(const extFloat80_t *from, float128_t *to) | |
265 | { | |
266 | floatx80 qfrom; | |
267 | float128 qto; | |
268 | ||
269 | qfrom = soft_to_qemu80(*from); | |
270 | qto = floatx80_to_float128(qfrom, &qsf); | |
271 | *to = qemu_to_soft128(qto); | |
272 | } | |
273 | ||
274 | static void qemu_f128M_to_extF80M(const float128_t *from, extFloat80_t *to) | |
275 | { | |
276 | float128 qfrom; | |
277 | floatx80 qto; | |
278 | ||
279 | qfrom = soft_to_qemu128(*from); | |
280 | qto = float128_to_floatx80(qfrom, &qsf); | |
281 | *to = qemu_to_soft80(qto); | |
282 | } | |
283 | ||
284 | #define WRAP_INT_TO_SF(name, func, int_type, type) \ | |
285 | static type##_t name(int_type a) \ | |
286 | { \ | |
287 | type ret; \ | |
288 | \ | |
289 | ret = func(a, &qsf); \ | |
290 | return *(type##_t *)&ret; \ | |
291 | } | |
292 | ||
293 | WRAP_INT_TO_SF(qemu_ui32_to_f16, uint32_to_float16, uint32_t, float16) | |
294 | WRAP_INT_TO_SF(qemu_ui32_to_f32, uint32_to_float32, uint32_t, float32) | |
295 | WRAP_INT_TO_SF(qemu_ui32_to_f64, uint32_to_float64, uint32_t, float64) | |
296 | ||
297 | WRAP_INT_TO_SF(qemu_ui64_to_f16, uint64_to_float16, uint64_t, float16) | |
298 | WRAP_INT_TO_SF(qemu_ui64_to_f32, uint64_to_float32, uint64_t, float32) | |
299 | WRAP_INT_TO_SF(qemu_ui64_to_f64, uint64_to_float64, uint64_t, float64) | |
300 | ||
301 | WRAP_INT_TO_SF(qemu_i32_to_f16, int32_to_float16, int32_t, float16) | |
302 | WRAP_INT_TO_SF(qemu_i32_to_f32, int32_to_float32, int32_t, float32) | |
303 | WRAP_INT_TO_SF(qemu_i32_to_f64, int32_to_float64, int32_t, float64) | |
304 | ||
305 | WRAP_INT_TO_SF(qemu_i64_to_f16, int64_to_float16, int64_t, float16) | |
306 | WRAP_INT_TO_SF(qemu_i64_to_f32, int64_to_float32, int64_t, float32) | |
307 | WRAP_INT_TO_SF(qemu_i64_to_f64, int64_to_float64, int64_t, float64) | |
308 | #undef WRAP_INT_TO_SF | |
309 | ||
310 | #define WRAP_INT_TO_80(name, func, int_type) \ | |
311 | static void name(int_type a, extFloat80_t *res) \ | |
312 | { \ | |
313 | floatx80 ret; \ | |
314 | \ | |
315 | ret = func(a, &qsf); \ | |
316 | *res = qemu_to_soft80(ret); \ | |
317 | } | |
318 | ||
319 | WRAP_INT_TO_80(qemu_i32_to_extF80M, int32_to_floatx80, int32_t) | |
320 | WRAP_INT_TO_80(qemu_i64_to_extF80M, int64_to_floatx80, int64_t) | |
321 | #undef WRAP_INT_TO_80 | |
322 | ||
323 | /* Note: exact is ignored since qemu's softfloat assumes it is set */ | |
324 | #define WRAP_80_TO_INT(name, func, fast_type) \ | |
325 | static fast_type name(const extFloat80_t *ap, uint_fast8_t round, \ | |
326 | bool exact) \ | |
327 | { \ | |
328 | floatx80 a; \ | |
329 | \ | |
330 | a = soft_to_qemu80(*ap); \ | |
331 | qsf.float_rounding_mode = sf_rounding_to_qemu(round); \ | |
332 | return func(a, &qsf); \ | |
333 | } | |
334 | ||
335 | WRAP_80_TO_INT(qemu_extF80M_to_i32, floatx80_to_int32, int_fast32_t) | |
336 | WRAP_80_TO_INT(qemu_extF80M_to_i64, floatx80_to_int64, int_fast64_t) | |
337 | #undef WRAP_80_TO_INT | |
338 | ||
339 | /* Note: exact is ignored since qemu's softfloat assumes it is set */ | |
340 | #define WRAP_80_TO_INT_MINMAG(name, func, fast_type) \ | |
341 | static fast_type name(const extFloat80_t *ap, bool exact) \ | |
342 | { \ | |
343 | floatx80 a; \ | |
344 | \ | |
345 | a = soft_to_qemu80(*ap); \ | |
346 | return func(a, &qsf); \ | |
347 | } | |
348 | ||
349 | WRAP_80_TO_INT_MINMAG(qemu_extF80M_to_i32_r_minMag, | |
350 | floatx80_to_int32_round_to_zero, int_fast32_t) | |
351 | WRAP_80_TO_INT_MINMAG(qemu_extF80M_to_i64_r_minMag, | |
352 | floatx80_to_int64_round_to_zero, int_fast64_t) | |
353 | #undef WRAP_80_TO_INT_MINMAG | |
354 | ||
355 | /* Note: exact is ignored since qemu's softfloat assumes it is set */ | |
356 | #define WRAP_128_TO_INT(name, func, fast_type) \ | |
357 | static fast_type name(const float128_t *ap, uint_fast8_t round, \ | |
358 | bool exact) \ | |
359 | { \ | |
360 | float128 a; \ | |
361 | \ | |
362 | a = soft_to_qemu128(*ap); \ | |
363 | qsf.float_rounding_mode = sf_rounding_to_qemu(round); \ | |
364 | return func(a, &qsf); \ | |
365 | } | |
366 | ||
367 | WRAP_128_TO_INT(qemu_f128M_to_i32, float128_to_int32, int_fast32_t) | |
368 | WRAP_128_TO_INT(qemu_f128M_to_i64, float128_to_int64, int_fast64_t) | |
369 | ||
370 | WRAP_128_TO_INT(qemu_f128M_to_ui64, float128_to_uint64, uint_fast64_t) | |
371 | #undef WRAP_128_TO_INT | |
372 | ||
373 | /* Note: exact is ignored since qemu's softfloat assumes it is set */ | |
374 | #define WRAP_128_TO_INT_MINMAG(name, func, fast_type) \ | |
375 | static fast_type name(const float128_t *ap, bool exact) \ | |
376 | { \ | |
377 | float128 a; \ | |
378 | \ | |
379 | a = soft_to_qemu128(*ap); \ | |
380 | return func(a, &qsf); \ | |
381 | } | |
382 | ||
383 | WRAP_128_TO_INT_MINMAG(qemu_f128M_to_i32_r_minMag, | |
384 | float128_to_int32_round_to_zero, int_fast32_t) | |
385 | WRAP_128_TO_INT_MINMAG(qemu_f128M_to_i64_r_minMag, | |
386 | float128_to_int64_round_to_zero, int_fast64_t) | |
387 | ||
388 | WRAP_128_TO_INT_MINMAG(qemu_f128M_to_ui32_r_minMag, | |
389 | float128_to_uint32_round_to_zero, uint_fast32_t) | |
390 | WRAP_128_TO_INT_MINMAG(qemu_f128M_to_ui64_r_minMag, | |
391 | float128_to_uint64_round_to_zero, uint_fast64_t) | |
392 | #undef WRAP_128_TO_INT_MINMAG | |
393 | ||
394 | #define WRAP_INT_TO_128(name, func, int_type) \ | |
395 | static void name(int_type a, float128_t *res) \ | |
396 | { \ | |
397 | float128 ret; \ | |
398 | \ | |
399 | ret = func(a, &qsf); \ | |
400 | *res = qemu_to_soft128(ret); \ | |
401 | } | |
402 | ||
403 | WRAP_INT_TO_128(qemu_ui64_to_f128M, uint64_to_float128, uint64_t) | |
404 | ||
405 | WRAP_INT_TO_128(qemu_i32_to_f128M, int32_to_float128, int32_t) | |
406 | WRAP_INT_TO_128(qemu_i64_to_f128M, int64_to_float128, int64_t) | |
407 | #undef WRAP_INT_TO_128 | |
408 | ||
409 | /* Note: exact is ignored since qemu's softfloat assumes it is set */ | |
410 | #define WRAP_ROUND_TO_INT(name, func, type) \ | |
411 | static type##_t name(type##_t a, uint_fast8_t round, bool exact) \ | |
412 | { \ | |
413 | type *ap = (type *)&a; \ | |
414 | type ret; \ | |
415 | \ | |
416 | qsf.float_rounding_mode = sf_rounding_to_qemu(round); \ | |
417 | ret = func(*ap, &qsf); \ | |
418 | return *(type##_t *)&ret; \ | |
419 | } | |
420 | ||
421 | WRAP_ROUND_TO_INT(qemu_f16_roundToInt, float16_round_to_int, float16) | |
422 | WRAP_ROUND_TO_INT(qemu_f32_roundToInt, float32_round_to_int, float32) | |
423 | WRAP_ROUND_TO_INT(qemu_f64_roundToInt, float64_round_to_int, float64) | |
424 | #undef WRAP_ROUND_TO_INT | |
425 | ||
426 | static void qemu_extF80M_roundToInt(const extFloat80_t *ap, uint_fast8_t round, | |
427 | bool exact, extFloat80_t *res) | |
428 | { | |
429 | floatx80 a; | |
430 | floatx80 ret; | |
431 | ||
432 | a = soft_to_qemu80(*ap); | |
433 | qsf.float_rounding_mode = sf_rounding_to_qemu(round); | |
434 | ret = floatx80_round_to_int(a, &qsf); | |
435 | *res = qemu_to_soft80(ret); | |
436 | } | |
437 | ||
438 | static void qemu_f128M_roundToInt(const float128_t *ap, uint_fast8_t round, | |
439 | bool exact, float128_t *res) | |
440 | { | |
441 | float128 a; | |
442 | float128 ret; | |
443 | ||
444 | a = soft_to_qemu128(*ap); | |
445 | qsf.float_rounding_mode = sf_rounding_to_qemu(round); | |
446 | ret = float128_round_to_int(a, &qsf); | |
447 | *res = qemu_to_soft128(ret); | |
448 | } | |
449 | ||
450 | /* operations */ | |
451 | #define WRAP1(name, func, type) \ | |
452 | static type##_t name(type##_t a) \ | |
453 | { \ | |
454 | type *ap = (type *)&a; \ | |
455 | type ret; \ | |
456 | \ | |
457 | ret = func(*ap, &qsf); \ | |
458 | return *(type##_t *)&ret; \ | |
459 | } | |
460 | ||
461 | #define WRAP2(name, func, type) \ | |
462 | static type##_t name(type##_t a, type##_t b) \ | |
463 | { \ | |
464 | type *ap = (type *)&a; \ | |
465 | type *bp = (type *)&b; \ | |
466 | type ret; \ | |
467 | \ | |
468 | ret = func(*ap, *bp, &qsf); \ | |
469 | return *(type##_t *)&ret; \ | |
470 | } | |
471 | ||
472 | #define WRAP_COMMON_OPS(b) \ | |
473 | WRAP1(qemu_f##b##_sqrt, float##b##_sqrt, float##b) \ | |
474 | WRAP2(qemu_f##b##_add, float##b##_add, float##b) \ | |
475 | WRAP2(qemu_f##b##_sub, float##b##_sub, float##b) \ | |
476 | WRAP2(qemu_f##b##_mul, float##b##_mul, float##b) \ | |
477 | WRAP2(qemu_f##b##_div, float##b##_div, float##b) | |
478 | ||
479 | WRAP_COMMON_OPS(16) | |
480 | WRAP_COMMON_OPS(32) | |
481 | WRAP_COMMON_OPS(64) | |
482 | #undef WRAP_COMMON | |
483 | ||
484 | WRAP2(qemu_f32_rem, float32_rem, float32) | |
485 | WRAP2(qemu_f64_rem, float64_rem, float64) | |
486 | #undef WRAP2 | |
487 | #undef WRAP1 | |
488 | ||
489 | #define WRAP1_80(name, func) \ | |
490 | static void name(const extFloat80_t *ap, extFloat80_t *res) \ | |
491 | { \ | |
492 | floatx80 a; \ | |
493 | floatx80 ret; \ | |
494 | \ | |
495 | a = soft_to_qemu80(*ap); \ | |
496 | ret = func(a, &qsf); \ | |
497 | *res = qemu_to_soft80(ret); \ | |
498 | } | |
499 | ||
500 | WRAP1_80(qemu_extF80M_sqrt, floatx80_sqrt) | |
501 | #undef WRAP1_80 | |
502 | ||
503 | #define WRAP1_128(name, func) \ | |
504 | static void name(const float128_t *ap, float128_t *res) \ | |
505 | { \ | |
506 | float128 a; \ | |
507 | float128 ret; \ | |
508 | \ | |
509 | a = soft_to_qemu128(*ap); \ | |
510 | ret = func(a, &qsf); \ | |
511 | *res = qemu_to_soft128(ret); \ | |
512 | } | |
513 | ||
514 | WRAP1_128(qemu_f128M_sqrt, float128_sqrt) | |
515 | #undef WRAP1_128 | |
516 | ||
517 | #define WRAP2_80(name, func) \ | |
518 | static void name(const extFloat80_t *ap, const extFloat80_t *bp, \ | |
519 | extFloat80_t *res) \ | |
520 | { \ | |
521 | floatx80 a; \ | |
522 | floatx80 b; \ | |
523 | floatx80 ret; \ | |
524 | \ | |
525 | a = soft_to_qemu80(*ap); \ | |
526 | b = soft_to_qemu80(*bp); \ | |
527 | ret = func(a, b, &qsf); \ | |
528 | *res = qemu_to_soft80(ret); \ | |
529 | } | |
530 | ||
531 | WRAP2_80(qemu_extF80M_add, floatx80_add) | |
532 | WRAP2_80(qemu_extF80M_sub, floatx80_sub) | |
533 | WRAP2_80(qemu_extF80M_mul, floatx80_mul) | |
534 | WRAP2_80(qemu_extF80M_div, floatx80_div) | |
535 | WRAP2_80(qemu_extF80M_rem, floatx80_rem) | |
536 | #undef WRAP2_80 | |
537 | ||
538 | #define WRAP2_128(name, func) \ | |
539 | static void name(const float128_t *ap, const float128_t *bp, \ | |
540 | float128_t *res) \ | |
541 | { \ | |
542 | float128 a; \ | |
543 | float128 b; \ | |
544 | float128 ret; \ | |
545 | \ | |
546 | a = soft_to_qemu128(*ap); \ | |
547 | b = soft_to_qemu128(*bp); \ | |
548 | ret = func(a, b, &qsf); \ | |
549 | *res = qemu_to_soft128(ret); \ | |
550 | } | |
551 | ||
552 | WRAP2_128(qemu_f128M_add, float128_add) | |
553 | WRAP2_128(qemu_f128M_sub, float128_sub) | |
554 | WRAP2_128(qemu_f128M_mul, float128_mul) | |
555 | WRAP2_128(qemu_f128M_div, float128_div) | |
556 | WRAP2_128(qemu_f128M_rem, float128_rem) | |
557 | #undef WRAP2_128 | |
558 | ||
559 | #define WRAP_MULADD(name, func, type) \ | |
560 | static type##_t name(type##_t a, type##_t b, type##_t c) \ | |
561 | { \ | |
562 | type *ap = (type *)&a; \ | |
563 | type *bp = (type *)&b; \ | |
564 | type *cp = (type *)&c; \ | |
565 | type ret; \ | |
566 | \ | |
567 | ret = func(*ap, *bp, *cp, 0, &qsf); \ | |
568 | return *(type##_t *)&ret; \ | |
569 | } | |
570 | ||
571 | WRAP_MULADD(qemu_f16_mulAdd, float16_muladd, float16) | |
572 | WRAP_MULADD(qemu_f32_mulAdd, float32_muladd, float32) | |
573 | WRAP_MULADD(qemu_f64_mulAdd, float64_muladd, float64) | |
574 | #undef WRAP_MULADD | |
575 | ||
576 | #define WRAP_CMP16(name, func, retcond) \ | |
577 | static bool name(float16_t a, float16_t b) \ | |
578 | { \ | |
579 | float16 *ap = (float16 *)&a; \ | |
580 | float16 *bp = (float16 *)&b; \ | |
581 | int ret; \ | |
582 | \ | |
583 | ret = func(*ap, *bp, &qsf); \ | |
584 | return retcond; \ | |
585 | } | |
586 | ||
587 | WRAP_CMP16(qemu_f16_eq_signaling, float16_compare, ret == 0) | |
588 | WRAP_CMP16(qemu_f16_eq, float16_compare_quiet, ret == 0) | |
589 | WRAP_CMP16(qemu_f16_le, float16_compare, ret <= 0) | |
590 | WRAP_CMP16(qemu_f16_lt, float16_compare, ret < 0) | |
591 | WRAP_CMP16(qemu_f16_le_quiet, float16_compare_quiet, ret <= 0) | |
592 | WRAP_CMP16(qemu_f16_lt_quiet, float16_compare_quiet, ret < 0) | |
593 | #undef WRAP_CMP16 | |
594 | ||
595 | #define WRAP_CMP(name, func, type) \ | |
596 | static bool name(type##_t a, type##_t b) \ | |
597 | { \ | |
598 | type *ap = (type *)&a; \ | |
599 | type *bp = (type *)&b; \ | |
600 | \ | |
601 | return !!func(*ap, *bp, &qsf); \ | |
602 | } | |
603 | ||
604 | #define GEN_WRAP_CMP(b) \ | |
605 | WRAP_CMP(qemu_f##b##_eq_signaling, float##b##_eq, float##b) \ | |
606 | WRAP_CMP(qemu_f##b##_eq, float##b##_eq_quiet, float##b) \ | |
607 | WRAP_CMP(qemu_f##b##_le, float##b##_le, float##b) \ | |
608 | WRAP_CMP(qemu_f##b##_lt, float##b##_lt, float##b) \ | |
609 | WRAP_CMP(qemu_f##b##_le_quiet, float##b##_le_quiet, float##b) \ | |
610 | WRAP_CMP(qemu_f##b##_lt_quiet, float##b##_lt_quiet, float##b) | |
611 | ||
612 | GEN_WRAP_CMP(32) | |
613 | GEN_WRAP_CMP(64) | |
614 | #undef GEN_WRAP_CMP | |
615 | #undef WRAP_CMP | |
616 | ||
617 | #define WRAP_CMP80(name, func) \ | |
618 | static bool name(const extFloat80_t *ap, const extFloat80_t *bp) \ | |
619 | { \ | |
620 | floatx80 a; \ | |
621 | floatx80 b; \ | |
622 | \ | |
623 | a = soft_to_qemu80(*ap); \ | |
624 | b = soft_to_qemu80(*bp); \ | |
625 | return !!func(a, b, &qsf); \ | |
626 | } | |
627 | ||
628 | WRAP_CMP80(qemu_extF80M_eq_signaling, floatx80_eq) | |
629 | WRAP_CMP80(qemu_extF80M_eq, floatx80_eq_quiet) | |
630 | WRAP_CMP80(qemu_extF80M_le, floatx80_le) | |
631 | WRAP_CMP80(qemu_extF80M_lt, floatx80_lt) | |
632 | WRAP_CMP80(qemu_extF80M_le_quiet, floatx80_le_quiet) | |
633 | WRAP_CMP80(qemu_extF80M_lt_quiet, floatx80_le_quiet) | |
634 | #undef WRAP_CMP80 | |
635 | ||
636 | #define WRAP_CMP128(name, func) \ | |
637 | static bool name(const float128_t *ap, const float128_t *bp) \ | |
638 | { \ | |
639 | float128 a; \ | |
640 | float128 b; \ | |
641 | \ | |
642 | a = soft_to_qemu128(*ap); \ | |
643 | b = soft_to_qemu128(*bp); \ | |
644 | return !!func(a, b, &qsf); \ | |
645 | } | |
646 | ||
647 | WRAP_CMP128(qemu_f128M_eq_signaling, float128_eq) | |
648 | WRAP_CMP128(qemu_f128M_eq, float128_eq_quiet) | |
649 | WRAP_CMP128(qemu_f128M_le, float128_le) | |
650 | WRAP_CMP128(qemu_f128M_lt, float128_lt) | |
651 | WRAP_CMP128(qemu_f128M_le_quiet, float128_le_quiet) | |
652 | WRAP_CMP128(qemu_f128M_lt_quiet, float128_lt_quiet) | |
653 | #undef WRAP_CMP128 |