]>
Commit | Line | Data |
---|---|---|
e8ede0a8 GX |
1 | /* |
2 | * UniCore-F64 simulation helpers for QEMU. | |
3 | * | |
4 | * Copyright (C) 2010-2012 Guan Xuetao | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation, or any later version. | |
9 | * See the COPYING file in the top-level directory. | |
10 | */ | |
11 | #include "cpu.h" | |
12 | #include "helper.h" | |
13 | ||
14 | /* | |
15 | * The convention used for UniCore-F64 instructions: | |
16 | * Single precition routines have a "s" suffix | |
17 | * Double precision routines have a "d" suffix. | |
18 | */ | |
19 | ||
20 | /* Convert host exception flags to f64 form. */ | |
21 | static inline int ucf64_exceptbits_from_host(int host_bits) | |
22 | { | |
23 | int target_bits = 0; | |
24 | ||
25 | if (host_bits & float_flag_invalid) { | |
26 | target_bits |= UCF64_FPSCR_FLAG_INVALID; | |
27 | } | |
28 | if (host_bits & float_flag_divbyzero) { | |
29 | target_bits |= UCF64_FPSCR_FLAG_DIVZERO; | |
30 | } | |
31 | if (host_bits & float_flag_overflow) { | |
32 | target_bits |= UCF64_FPSCR_FLAG_OVERFLOW; | |
33 | } | |
34 | if (host_bits & float_flag_underflow) { | |
35 | target_bits |= UCF64_FPSCR_FLAG_UNDERFLOW; | |
36 | } | |
37 | if (host_bits & float_flag_inexact) { | |
38 | target_bits |= UCF64_FPSCR_FLAG_INEXACT; | |
39 | } | |
40 | return target_bits; | |
41 | } | |
42 | ||
43 | uint32_t HELPER(ucf64_get_fpscr)(CPUUniCore32State *env) | |
44 | { | |
45 | int i; | |
46 | uint32_t fpscr; | |
47 | ||
48 | fpscr = (env->ucf64.xregs[UC32_UCF64_FPSCR] & UCF64_FPSCR_MASK); | |
49 | i = get_float_exception_flags(&env->ucf64.fp_status); | |
50 | fpscr |= ucf64_exceptbits_from_host(i); | |
51 | return fpscr; | |
52 | } | |
53 | ||
54 | /* Convert ucf64 exception flags to target form. */ | |
55 | static inline int ucf64_exceptbits_to_host(int target_bits) | |
56 | { | |
57 | int host_bits = 0; | |
58 | ||
59 | if (target_bits & UCF64_FPSCR_FLAG_INVALID) { | |
60 | host_bits |= float_flag_invalid; | |
61 | } | |
62 | if (target_bits & UCF64_FPSCR_FLAG_DIVZERO) { | |
63 | host_bits |= float_flag_divbyzero; | |
64 | } | |
65 | if (target_bits & UCF64_FPSCR_FLAG_OVERFLOW) { | |
66 | host_bits |= float_flag_overflow; | |
67 | } | |
68 | if (target_bits & UCF64_FPSCR_FLAG_UNDERFLOW) { | |
69 | host_bits |= float_flag_underflow; | |
70 | } | |
71 | if (target_bits & UCF64_FPSCR_FLAG_INEXACT) { | |
72 | host_bits |= float_flag_inexact; | |
73 | } | |
74 | return host_bits; | |
75 | } | |
76 | ||
77 | void HELPER(ucf64_set_fpscr)(CPUUniCore32State *env, uint32_t val) | |
78 | { | |
79 | int i; | |
80 | uint32_t changed; | |
81 | ||
82 | changed = env->ucf64.xregs[UC32_UCF64_FPSCR]; | |
83 | env->ucf64.xregs[UC32_UCF64_FPSCR] = (val & UCF64_FPSCR_MASK); | |
84 | ||
85 | changed ^= val; | |
86 | if (changed & (UCF64_FPSCR_RND_MASK)) { | |
87 | i = UCF64_FPSCR_RND(val); | |
88 | switch (i) { | |
89 | case 0: | |
90 | i = float_round_nearest_even; | |
91 | break; | |
92 | case 1: | |
93 | i = float_round_to_zero; | |
94 | break; | |
95 | case 2: | |
96 | i = float_round_up; | |
97 | break; | |
98 | case 3: | |
99 | i = float_round_down; | |
100 | break; | |
101 | default: /* 100 and 101 not implement */ | |
102 | cpu_abort(env, "Unsupported UniCore-F64 round mode"); | |
103 | } | |
104 | set_float_rounding_mode(i, &env->ucf64.fp_status); | |
105 | } | |
106 | ||
107 | i = ucf64_exceptbits_to_host(UCF64_FPSCR_TRAPEN(val)); | |
108 | set_float_exception_flags(i, &env->ucf64.fp_status); | |
109 | } | |
110 | ||
111 | float32 HELPER(ucf64_adds)(float32 a, float32 b, CPUUniCore32State *env) | |
112 | { | |
113 | return float32_add(a, b, &env->ucf64.fp_status); | |
114 | } | |
115 | ||
116 | float64 HELPER(ucf64_addd)(float64 a, float64 b, CPUUniCore32State *env) | |
117 | { | |
118 | return float64_add(a, b, &env->ucf64.fp_status); | |
119 | } | |
120 | ||
121 | float32 HELPER(ucf64_subs)(float32 a, float32 b, CPUUniCore32State *env) | |
122 | { | |
123 | return float32_sub(a, b, &env->ucf64.fp_status); | |
124 | } | |
125 | ||
126 | float64 HELPER(ucf64_subd)(float64 a, float64 b, CPUUniCore32State *env) | |
127 | { | |
128 | return float64_sub(a, b, &env->ucf64.fp_status); | |
129 | } | |
130 | ||
131 | float32 HELPER(ucf64_muls)(float32 a, float32 b, CPUUniCore32State *env) | |
132 | { | |
133 | return float32_mul(a, b, &env->ucf64.fp_status); | |
134 | } | |
135 | ||
136 | float64 HELPER(ucf64_muld)(float64 a, float64 b, CPUUniCore32State *env) | |
137 | { | |
138 | return float64_mul(a, b, &env->ucf64.fp_status); | |
139 | } | |
140 | ||
141 | float32 HELPER(ucf64_divs)(float32 a, float32 b, CPUUniCore32State *env) | |
142 | { | |
143 | return float32_div(a, b, &env->ucf64.fp_status); | |
144 | } | |
145 | ||
146 | float64 HELPER(ucf64_divd)(float64 a, float64 b, CPUUniCore32State *env) | |
147 | { | |
148 | return float64_div(a, b, &env->ucf64.fp_status); | |
149 | } | |
150 | ||
151 | float32 HELPER(ucf64_negs)(float32 a) | |
152 | { | |
153 | return float32_chs(a); | |
154 | } | |
155 | ||
156 | float64 HELPER(ucf64_negd)(float64 a) | |
157 | { | |
158 | return float64_chs(a); | |
159 | } | |
160 | ||
161 | float32 HELPER(ucf64_abss)(float32 a) | |
162 | { | |
163 | return float32_abs(a); | |
164 | } | |
165 | ||
166 | float64 HELPER(ucf64_absd)(float64 a) | |
167 | { | |
168 | return float64_abs(a); | |
169 | } | |
170 | ||
171 | void HELPER(ucf64_cmps)(float32 a, float32 b, uint32_t c, | |
172 | CPUUniCore32State *env) | |
173 | { | |
174 | int flag; | |
175 | flag = float32_compare_quiet(a, b, &env->ucf64.fp_status); | |
176 | env->CF = 0; | |
177 | switch (c & 0x7) { | |
178 | case 0: /* F */ | |
179 | break; | |
180 | case 1: /* UN */ | |
181 | if (flag == 2) { | |
182 | env->CF = 1; | |
183 | } | |
184 | break; | |
185 | case 2: /* EQ */ | |
186 | if (flag == 0) { | |
187 | env->CF = 1; | |
188 | } | |
189 | break; | |
190 | case 3: /* UEQ */ | |
191 | if ((flag == 0) || (flag == 2)) { | |
192 | env->CF = 1; | |
193 | } | |
194 | break; | |
195 | case 4: /* OLT */ | |
196 | if (flag == -1) { | |
197 | env->CF = 1; | |
198 | } | |
199 | break; | |
200 | case 5: /* ULT */ | |
201 | if ((flag == -1) || (flag == 2)) { | |
202 | env->CF = 1; | |
203 | } | |
204 | break; | |
205 | case 6: /* OLE */ | |
206 | if ((flag == -1) || (flag == 0)) { | |
207 | env->CF = 1; | |
208 | } | |
209 | break; | |
210 | case 7: /* ULE */ | |
211 | if (flag != 1) { | |
212 | env->CF = 1; | |
213 | } | |
214 | break; | |
215 | } | |
216 | env->ucf64.xregs[UC32_UCF64_FPSCR] = (env->CF << 29) | |
217 | | (env->ucf64.xregs[UC32_UCF64_FPSCR] & 0x0fffffff); | |
218 | } | |
219 | ||
220 | void HELPER(ucf64_cmpd)(float64 a, float64 b, uint32_t c, | |
221 | CPUUniCore32State *env) | |
222 | { | |
223 | int flag; | |
224 | flag = float64_compare_quiet(a, b, &env->ucf64.fp_status); | |
225 | env->CF = 0; | |
226 | switch (c & 0x7) { | |
227 | case 0: /* F */ | |
228 | break; | |
229 | case 1: /* UN */ | |
230 | if (flag == 2) { | |
231 | env->CF = 1; | |
232 | } | |
233 | break; | |
234 | case 2: /* EQ */ | |
235 | if (flag == 0) { | |
236 | env->CF = 1; | |
237 | } | |
238 | break; | |
239 | case 3: /* UEQ */ | |
240 | if ((flag == 0) || (flag == 2)) { | |
241 | env->CF = 1; | |
242 | } | |
243 | break; | |
244 | case 4: /* OLT */ | |
245 | if (flag == -1) { | |
246 | env->CF = 1; | |
247 | } | |
248 | break; | |
249 | case 5: /* ULT */ | |
250 | if ((flag == -1) || (flag == 2)) { | |
251 | env->CF = 1; | |
252 | } | |
253 | break; | |
254 | case 6: /* OLE */ | |
255 | if ((flag == -1) || (flag == 0)) { | |
256 | env->CF = 1; | |
257 | } | |
258 | break; | |
259 | case 7: /* ULE */ | |
260 | if (flag != 1) { | |
261 | env->CF = 1; | |
262 | } | |
263 | break; | |
264 | } | |
265 | env->ucf64.xregs[UC32_UCF64_FPSCR] = (env->CF << 29) | |
266 | | (env->ucf64.xregs[UC32_UCF64_FPSCR] & 0x0fffffff); | |
267 | } | |
268 | ||
269 | /* Helper routines to perform bitwise copies between float and int. */ | |
270 | static inline float32 ucf64_itos(uint32_t i) | |
271 | { | |
272 | union { | |
273 | uint32_t i; | |
274 | float32 s; | |
275 | } v; | |
276 | ||
277 | v.i = i; | |
278 | return v.s; | |
279 | } | |
280 | ||
281 | static inline uint32_t ucf64_stoi(float32 s) | |
282 | { | |
283 | union { | |
284 | uint32_t i; | |
285 | float32 s; | |
286 | } v; | |
287 | ||
288 | v.s = s; | |
289 | return v.i; | |
290 | } | |
291 | ||
292 | static inline float64 ucf64_itod(uint64_t i) | |
293 | { | |
294 | union { | |
295 | uint64_t i; | |
296 | float64 d; | |
297 | } v; | |
298 | ||
299 | v.i = i; | |
300 | return v.d; | |
301 | } | |
302 | ||
303 | static inline uint64_t ucf64_dtoi(float64 d) | |
304 | { | |
305 | union { | |
306 | uint64_t i; | |
307 | float64 d; | |
308 | } v; | |
309 | ||
310 | v.d = d; | |
311 | return v.i; | |
312 | } | |
313 | ||
314 | /* Integer to float conversion. */ | |
315 | float32 HELPER(ucf64_si2sf)(float32 x, CPUUniCore32State *env) | |
316 | { | |
317 | return int32_to_float32(ucf64_stoi(x), &env->ucf64.fp_status); | |
318 | } | |
319 | ||
320 | float64 HELPER(ucf64_si2df)(float32 x, CPUUniCore32State *env) | |
321 | { | |
322 | return int32_to_float64(ucf64_stoi(x), &env->ucf64.fp_status); | |
323 | } | |
324 | ||
325 | /* Float to integer conversion. */ | |
326 | float32 HELPER(ucf64_sf2si)(float32 x, CPUUniCore32State *env) | |
327 | { | |
328 | return ucf64_itos(float32_to_int32(x, &env->ucf64.fp_status)); | |
329 | } | |
330 | ||
331 | float32 HELPER(ucf64_df2si)(float64 x, CPUUniCore32State *env) | |
332 | { | |
333 | return ucf64_itos(float64_to_int32(x, &env->ucf64.fp_status)); | |
334 | } | |
335 | ||
336 | /* floating point conversion */ | |
337 | float64 HELPER(ucf64_sf2df)(float32 x, CPUUniCore32State *env) | |
338 | { | |
339 | return float32_to_float64(x, &env->ucf64.fp_status); | |
340 | } | |
341 | ||
342 | float32 HELPER(ucf64_df2sf)(float64 x, CPUUniCore32State *env) | |
343 | { | |
344 | return float64_to_float32(x, &env->ucf64.fp_status); | |
345 | } |