]>
Commit | Line | Data |
---|---|---|
78e138bc PM |
1 | /* |
2 | * ARM translation: AArch32 VFP instructions | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * Copyright (c) 2005-2007 CodeSourcery | |
6 | * Copyright (c) 2007 OpenedHand, Ltd. | |
7 | * Copyright (c) 2019 Linaro, Ltd. | |
8 | * | |
9 | * This library is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
21 | */ | |
22 | ||
23 | /* | |
24 | * This file is intended to be included from translate.c; it uses | |
25 | * some macros and definitions provided by that file. | |
26 | * It might be possible to convert it to a standalone .c file eventually. | |
27 | */ | |
28 | ||
29 | /* Include the generated VFP decoder */ | |
30 | #include "decode-vfp.inc.c" | |
31 | #include "decode-vfp-uncond.inc.c" | |
06db8196 PM |
32 | |
33 | /* | |
34 | * Check that VFP access is enabled. If it is, do the necessary | |
35 | * M-profile lazy-FP handling and then return true. | |
36 | * If not, emit code to generate an appropriate exception and | |
37 | * return false. | |
38 | * The ignore_vfp_enabled argument specifies that we should ignore | |
39 | * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX | |
40 | * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns. | |
41 | */ | |
42 | static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled) | |
43 | { | |
44 | if (s->fp_excp_el) { | |
45 | if (arm_dc_feature(s, ARM_FEATURE_M)) { | |
46 | gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(), | |
47 | s->fp_excp_el); | |
48 | } else { | |
49 | gen_exception_insn(s, 4, EXCP_UDEF, | |
50 | syn_fp_access_trap(1, 0xe, false), | |
51 | s->fp_excp_el); | |
52 | } | |
53 | return false; | |
54 | } | |
55 | ||
56 | if (!s->vfp_enabled && !ignore_vfp_enabled) { | |
57 | assert(!arm_dc_feature(s, ARM_FEATURE_M)); | |
58 | gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), | |
59 | default_exception_el(s)); | |
60 | return false; | |
61 | } | |
62 | ||
63 | if (arm_dc_feature(s, ARM_FEATURE_M)) { | |
64 | /* Handle M-profile lazy FP state mechanics */ | |
65 | ||
66 | /* Trigger lazy-state preservation if necessary */ | |
67 | if (s->v7m_lspact) { | |
68 | /* | |
69 | * Lazy state saving affects external memory and also the NVIC, | |
70 | * so we must mark it as an IO operation for icount. | |
71 | */ | |
72 | if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { | |
73 | gen_io_start(); | |
74 | } | |
75 | gen_helper_v7m_preserve_fp_state(cpu_env); | |
76 | if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { | |
77 | gen_io_end(); | |
78 | } | |
79 | /* | |
80 | * If the preserve_fp_state helper doesn't throw an exception | |
81 | * then it will clear LSPACT; we don't need to repeat this for | |
82 | * any further FP insns in this TB. | |
83 | */ | |
84 | s->v7m_lspact = false; | |
85 | } | |
86 | ||
87 | /* Update ownership of FP context: set FPCCR.S to match current state */ | |
88 | if (s->v8m_fpccr_s_wrong) { | |
89 | TCGv_i32 tmp; | |
90 | ||
91 | tmp = load_cpu_field(v7m.fpccr[M_REG_S]); | |
92 | if (s->v8m_secure) { | |
93 | tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK); | |
94 | } else { | |
95 | tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK); | |
96 | } | |
97 | store_cpu_field(tmp, v7m.fpccr[M_REG_S]); | |
98 | /* Don't need to do this for any further FP insns in this TB */ | |
99 | s->v8m_fpccr_s_wrong = false; | |
100 | } | |
101 | ||
102 | if (s->v7m_new_fp_ctxt_needed) { | |
103 | /* | |
104 | * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA | |
105 | * and the FPSCR. | |
106 | */ | |
107 | TCGv_i32 control, fpscr; | |
108 | uint32_t bits = R_V7M_CONTROL_FPCA_MASK; | |
109 | ||
110 | fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]); | |
111 | gen_helper_vfp_set_fpscr(cpu_env, fpscr); | |
112 | tcg_temp_free_i32(fpscr); | |
113 | /* | |
114 | * We don't need to arrange to end the TB, because the only | |
115 | * parts of FPSCR which we cache in the TB flags are the VECLEN | |
116 | * and VECSTRIDE, and those don't exist for M-profile. | |
117 | */ | |
118 | ||
119 | if (s->v8m_secure) { | |
120 | bits |= R_V7M_CONTROL_SFPA_MASK; | |
121 | } | |
122 | control = load_cpu_field(v7m.control[M_REG_S]); | |
123 | tcg_gen_ori_i32(control, control, bits); | |
124 | store_cpu_field(control, v7m.control[M_REG_S]); | |
125 | /* Don't need to do this for any further FP insns in this TB */ | |
126 | s->v7m_new_fp_ctxt_needed = false; | |
127 | } | |
128 | } | |
129 | ||
130 | return true; | |
131 | } | |
b3ff4b87 PM |
132 | |
133 | /* | |
134 | * The most usual kind of VFP access check, for everything except | |
135 | * FMXR/FMRX to the always-available special registers. | |
136 | */ | |
137 | static bool vfp_access_check(DisasContext *s) | |
138 | { | |
139 | return full_vfp_access_check(s, false); | |
140 | } | |
f7bbb8f3 PM |
141 | |
142 | static bool trans_VSEL(DisasContext *s, arg_VSEL *a) | |
143 | { | |
144 | uint32_t rd, rn, rm; | |
145 | bool dp = a->dp; | |
146 | ||
147 | if (!dc_isar_feature(aa32_vsel, s)) { | |
148 | return false; | |
149 | } | |
150 | ||
151 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
152 | if (dp && !dc_isar_feature(aa32_fp_d32, s) && | |
153 | ((a->vm | a->vn | a->vd) & 0x10)) { | |
154 | return false; | |
155 | } | |
156 | rd = a->vd; | |
157 | rn = a->vn; | |
158 | rm = a->vm; | |
159 | ||
160 | if (!vfp_access_check(s)) { | |
161 | return true; | |
162 | } | |
163 | ||
164 | if (dp) { | |
165 | TCGv_i64 frn, frm, dest; | |
166 | TCGv_i64 tmp, zero, zf, nf, vf; | |
167 | ||
168 | zero = tcg_const_i64(0); | |
169 | ||
170 | frn = tcg_temp_new_i64(); | |
171 | frm = tcg_temp_new_i64(); | |
172 | dest = tcg_temp_new_i64(); | |
173 | ||
174 | zf = tcg_temp_new_i64(); | |
175 | nf = tcg_temp_new_i64(); | |
176 | vf = tcg_temp_new_i64(); | |
177 | ||
178 | tcg_gen_extu_i32_i64(zf, cpu_ZF); | |
179 | tcg_gen_ext_i32_i64(nf, cpu_NF); | |
180 | tcg_gen_ext_i32_i64(vf, cpu_VF); | |
181 | ||
160f3b64 PM |
182 | neon_load_reg64(frn, rn); |
183 | neon_load_reg64(frm, rm); | |
f7bbb8f3 PM |
184 | switch (a->cc) { |
185 | case 0: /* eq: Z */ | |
186 | tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero, | |
187 | frn, frm); | |
188 | break; | |
189 | case 1: /* vs: V */ | |
190 | tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero, | |
191 | frn, frm); | |
192 | break; | |
193 | case 2: /* ge: N == V -> N ^ V == 0 */ | |
194 | tmp = tcg_temp_new_i64(); | |
195 | tcg_gen_xor_i64(tmp, vf, nf); | |
196 | tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, | |
197 | frn, frm); | |
198 | tcg_temp_free_i64(tmp); | |
199 | break; | |
200 | case 3: /* gt: !Z && N == V */ | |
201 | tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero, | |
202 | frn, frm); | |
203 | tmp = tcg_temp_new_i64(); | |
204 | tcg_gen_xor_i64(tmp, vf, nf); | |
205 | tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, | |
206 | dest, frm); | |
207 | tcg_temp_free_i64(tmp); | |
208 | break; | |
209 | } | |
160f3b64 | 210 | neon_store_reg64(dest, rd); |
f7bbb8f3 PM |
211 | tcg_temp_free_i64(frn); |
212 | tcg_temp_free_i64(frm); | |
213 | tcg_temp_free_i64(dest); | |
214 | ||
215 | tcg_temp_free_i64(zf); | |
216 | tcg_temp_free_i64(nf); | |
217 | tcg_temp_free_i64(vf); | |
218 | ||
219 | tcg_temp_free_i64(zero); | |
220 | } else { | |
221 | TCGv_i32 frn, frm, dest; | |
222 | TCGv_i32 tmp, zero; | |
223 | ||
224 | zero = tcg_const_i32(0); | |
225 | ||
226 | frn = tcg_temp_new_i32(); | |
227 | frm = tcg_temp_new_i32(); | |
228 | dest = tcg_temp_new_i32(); | |
160f3b64 PM |
229 | neon_load_reg32(frn, rn); |
230 | neon_load_reg32(frm, rm); | |
f7bbb8f3 PM |
231 | switch (a->cc) { |
232 | case 0: /* eq: Z */ | |
233 | tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero, | |
234 | frn, frm); | |
235 | break; | |
236 | case 1: /* vs: V */ | |
237 | tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero, | |
238 | frn, frm); | |
239 | break; | |
240 | case 2: /* ge: N == V -> N ^ V == 0 */ | |
241 | tmp = tcg_temp_new_i32(); | |
242 | tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); | |
243 | tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, | |
244 | frn, frm); | |
245 | tcg_temp_free_i32(tmp); | |
246 | break; | |
247 | case 3: /* gt: !Z && N == V */ | |
248 | tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero, | |
249 | frn, frm); | |
250 | tmp = tcg_temp_new_i32(); | |
251 | tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); | |
252 | tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, | |
253 | dest, frm); | |
254 | tcg_temp_free_i32(tmp); | |
255 | break; | |
256 | } | |
160f3b64 | 257 | neon_store_reg32(dest, rd); |
f7bbb8f3 PM |
258 | tcg_temp_free_i32(frn); |
259 | tcg_temp_free_i32(frm); | |
260 | tcg_temp_free_i32(dest); | |
261 | ||
262 | tcg_temp_free_i32(zero); | |
263 | } | |
264 | ||
265 | return true; | |
266 | } | |
267 | ||
268 | static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a) | |
269 | { | |
270 | uint32_t rd, rn, rm; | |
271 | bool dp = a->dp; | |
272 | bool vmin = a->op; | |
273 | TCGv_ptr fpst; | |
274 | ||
275 | if (!dc_isar_feature(aa32_vminmaxnm, s)) { | |
276 | return false; | |
277 | } | |
278 | ||
279 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
280 | if (dp && !dc_isar_feature(aa32_fp_d32, s) && | |
281 | ((a->vm | a->vn | a->vd) & 0x10)) { | |
282 | return false; | |
283 | } | |
284 | rd = a->vd; | |
285 | rn = a->vn; | |
286 | rm = a->vm; | |
287 | ||
288 | if (!vfp_access_check(s)) { | |
289 | return true; | |
290 | } | |
291 | ||
292 | fpst = get_fpstatus_ptr(0); | |
293 | ||
294 | if (dp) { | |
295 | TCGv_i64 frn, frm, dest; | |
296 | ||
297 | frn = tcg_temp_new_i64(); | |
298 | frm = tcg_temp_new_i64(); | |
299 | dest = tcg_temp_new_i64(); | |
300 | ||
160f3b64 PM |
301 | neon_load_reg64(frn, rn); |
302 | neon_load_reg64(frm, rm); | |
f7bbb8f3 PM |
303 | if (vmin) { |
304 | gen_helper_vfp_minnumd(dest, frn, frm, fpst); | |
305 | } else { | |
306 | gen_helper_vfp_maxnumd(dest, frn, frm, fpst); | |
307 | } | |
160f3b64 | 308 | neon_store_reg64(dest, rd); |
f7bbb8f3 PM |
309 | tcg_temp_free_i64(frn); |
310 | tcg_temp_free_i64(frm); | |
311 | tcg_temp_free_i64(dest); | |
312 | } else { | |
313 | TCGv_i32 frn, frm, dest; | |
314 | ||
315 | frn = tcg_temp_new_i32(); | |
316 | frm = tcg_temp_new_i32(); | |
317 | dest = tcg_temp_new_i32(); | |
318 | ||
160f3b64 PM |
319 | neon_load_reg32(frn, rn); |
320 | neon_load_reg32(frm, rm); | |
f7bbb8f3 PM |
321 | if (vmin) { |
322 | gen_helper_vfp_minnums(dest, frn, frm, fpst); | |
323 | } else { | |
324 | gen_helper_vfp_maxnums(dest, frn, frm, fpst); | |
325 | } | |
160f3b64 | 326 | neon_store_reg32(dest, rd); |
f7bbb8f3 PM |
327 | tcg_temp_free_i32(frn); |
328 | tcg_temp_free_i32(frm); | |
329 | tcg_temp_free_i32(dest); | |
330 | } | |
331 | ||
332 | tcg_temp_free_ptr(fpst); | |
333 | return true; | |
334 | } | |
335 | ||
336 | /* | |
337 | * Table for converting the most common AArch32 encoding of | |
338 | * rounding mode to arm_fprounding order (which matches the | |
339 | * common AArch64 order); see ARM ARM pseudocode FPDecodeRM(). | |
340 | */ | |
341 | static const uint8_t fp_decode_rm[] = { | |
342 | FPROUNDING_TIEAWAY, | |
343 | FPROUNDING_TIEEVEN, | |
344 | FPROUNDING_POSINF, | |
345 | FPROUNDING_NEGINF, | |
346 | }; | |
347 | ||
348 | static bool trans_VRINT(DisasContext *s, arg_VRINT *a) | |
349 | { | |
350 | uint32_t rd, rm; | |
351 | bool dp = a->dp; | |
352 | TCGv_ptr fpst; | |
353 | TCGv_i32 tcg_rmode; | |
354 | int rounding = fp_decode_rm[a->rm]; | |
355 | ||
356 | if (!dc_isar_feature(aa32_vrint, s)) { | |
357 | return false; | |
358 | } | |
359 | ||
360 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
361 | if (dp && !dc_isar_feature(aa32_fp_d32, s) && | |
362 | ((a->vm | a->vd) & 0x10)) { | |
363 | return false; | |
364 | } | |
365 | rd = a->vd; | |
366 | rm = a->vm; | |
367 | ||
368 | if (!vfp_access_check(s)) { | |
369 | return true; | |
370 | } | |
371 | ||
372 | fpst = get_fpstatus_ptr(0); | |
373 | ||
374 | tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); | |
375 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | |
376 | ||
377 | if (dp) { | |
378 | TCGv_i64 tcg_op; | |
379 | TCGv_i64 tcg_res; | |
380 | tcg_op = tcg_temp_new_i64(); | |
381 | tcg_res = tcg_temp_new_i64(); | |
160f3b64 | 382 | neon_load_reg64(tcg_op, rm); |
f7bbb8f3 | 383 | gen_helper_rintd(tcg_res, tcg_op, fpst); |
160f3b64 | 384 | neon_store_reg64(tcg_res, rd); |
f7bbb8f3 PM |
385 | tcg_temp_free_i64(tcg_op); |
386 | tcg_temp_free_i64(tcg_res); | |
387 | } else { | |
388 | TCGv_i32 tcg_op; | |
389 | TCGv_i32 tcg_res; | |
390 | tcg_op = tcg_temp_new_i32(); | |
391 | tcg_res = tcg_temp_new_i32(); | |
160f3b64 | 392 | neon_load_reg32(tcg_op, rm); |
f7bbb8f3 | 393 | gen_helper_rints(tcg_res, tcg_op, fpst); |
160f3b64 | 394 | neon_store_reg32(tcg_res, rd); |
f7bbb8f3 PM |
395 | tcg_temp_free_i32(tcg_op); |
396 | tcg_temp_free_i32(tcg_res); | |
397 | } | |
398 | ||
399 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | |
400 | tcg_temp_free_i32(tcg_rmode); | |
401 | ||
402 | tcg_temp_free_ptr(fpst); | |
403 | return true; | |
404 | } | |
405 | ||
406 | static bool trans_VCVT(DisasContext *s, arg_VCVT *a) | |
407 | { | |
408 | uint32_t rd, rm; | |
409 | bool dp = a->dp; | |
410 | TCGv_ptr fpst; | |
411 | TCGv_i32 tcg_rmode, tcg_shift; | |
412 | int rounding = fp_decode_rm[a->rm]; | |
413 | bool is_signed = a->op; | |
414 | ||
415 | if (!dc_isar_feature(aa32_vcvt_dr, s)) { | |
416 | return false; | |
417 | } | |
418 | ||
419 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
420 | if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { | |
421 | return false; | |
422 | } | |
423 | rd = a->vd; | |
424 | rm = a->vm; | |
425 | ||
426 | if (!vfp_access_check(s)) { | |
427 | return true; | |
428 | } | |
429 | ||
430 | fpst = get_fpstatus_ptr(0); | |
431 | ||
432 | tcg_shift = tcg_const_i32(0); | |
433 | ||
434 | tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); | |
435 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | |
436 | ||
437 | if (dp) { | |
438 | TCGv_i64 tcg_double, tcg_res; | |
439 | TCGv_i32 tcg_tmp; | |
440 | tcg_double = tcg_temp_new_i64(); | |
441 | tcg_res = tcg_temp_new_i64(); | |
442 | tcg_tmp = tcg_temp_new_i32(); | |
160f3b64 | 443 | neon_load_reg64(tcg_double, rm); |
f7bbb8f3 PM |
444 | if (is_signed) { |
445 | gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst); | |
446 | } else { | |
447 | gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst); | |
448 | } | |
449 | tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res); | |
160f3b64 | 450 | neon_store_reg32(tcg_tmp, rd); |
f7bbb8f3 PM |
451 | tcg_temp_free_i32(tcg_tmp); |
452 | tcg_temp_free_i64(tcg_res); | |
453 | tcg_temp_free_i64(tcg_double); | |
454 | } else { | |
455 | TCGv_i32 tcg_single, tcg_res; | |
456 | tcg_single = tcg_temp_new_i32(); | |
457 | tcg_res = tcg_temp_new_i32(); | |
160f3b64 | 458 | neon_load_reg32(tcg_single, rm); |
f7bbb8f3 PM |
459 | if (is_signed) { |
460 | gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst); | |
461 | } else { | |
462 | gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst); | |
463 | } | |
160f3b64 | 464 | neon_store_reg32(tcg_res, rd); |
f7bbb8f3 PM |
465 | tcg_temp_free_i32(tcg_res); |
466 | tcg_temp_free_i32(tcg_single); | |
467 | } | |
468 | ||
469 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | |
470 | tcg_temp_free_i32(tcg_rmode); | |
471 | ||
472 | tcg_temp_free_i32(tcg_shift); | |
473 | ||
474 | tcg_temp_free_ptr(fpst); | |
475 | ||
476 | return true; | |
477 | } | |
9851ed92 PM |
478 | |
479 | static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a) | |
480 | { | |
481 | /* VMOV scalar to general purpose register */ | |
482 | TCGv_i32 tmp; | |
483 | int pass; | |
484 | uint32_t offset; | |
485 | ||
486 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
487 | if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) { | |
488 | return false; | |
489 | } | |
490 | ||
491 | offset = a->index << a->size; | |
492 | pass = extract32(offset, 2, 1); | |
493 | offset = extract32(offset, 0, 2) * 8; | |
494 | ||
495 | if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) { | |
496 | return false; | |
497 | } | |
498 | ||
499 | if (!vfp_access_check(s)) { | |
500 | return true; | |
501 | } | |
502 | ||
503 | tmp = neon_load_reg(a->vn, pass); | |
504 | switch (a->size) { | |
505 | case 0: | |
506 | if (offset) { | |
507 | tcg_gen_shri_i32(tmp, tmp, offset); | |
508 | } | |
509 | if (a->u) { | |
510 | gen_uxtb(tmp); | |
511 | } else { | |
512 | gen_sxtb(tmp); | |
513 | } | |
514 | break; | |
515 | case 1: | |
516 | if (a->u) { | |
517 | if (offset) { | |
518 | tcg_gen_shri_i32(tmp, tmp, 16); | |
519 | } else { | |
520 | gen_uxth(tmp); | |
521 | } | |
522 | } else { | |
523 | if (offset) { | |
524 | tcg_gen_sari_i32(tmp, tmp, 16); | |
525 | } else { | |
526 | gen_sxth(tmp); | |
527 | } | |
528 | } | |
529 | break; | |
530 | case 2: | |
531 | break; | |
532 | } | |
533 | store_reg(s, a->rt, tmp); | |
534 | ||
535 | return true; | |
536 | } | |
537 | ||
538 | static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a) | |
539 | { | |
540 | /* VMOV general purpose register to scalar */ | |
541 | TCGv_i32 tmp, tmp2; | |
542 | int pass; | |
543 | uint32_t offset; | |
544 | ||
545 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
546 | if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) { | |
547 | return false; | |
548 | } | |
549 | ||
550 | offset = a->index << a->size; | |
551 | pass = extract32(offset, 2, 1); | |
552 | offset = extract32(offset, 0, 2) * 8; | |
553 | ||
554 | if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) { | |
555 | return false; | |
556 | } | |
557 | ||
558 | if (!vfp_access_check(s)) { | |
559 | return true; | |
560 | } | |
561 | ||
562 | tmp = load_reg(s, a->rt); | |
563 | switch (a->size) { | |
564 | case 0: | |
565 | tmp2 = neon_load_reg(a->vn, pass); | |
566 | tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8); | |
567 | tcg_temp_free_i32(tmp2); | |
568 | break; | |
569 | case 1: | |
570 | tmp2 = neon_load_reg(a->vn, pass); | |
571 | tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16); | |
572 | tcg_temp_free_i32(tmp2); | |
573 | break; | |
574 | case 2: | |
575 | break; | |
576 | } | |
577 | neon_store_reg(a->vn, pass, tmp); | |
578 | ||
579 | return true; | |
580 | } | |
581 | ||
582 | static bool trans_VDUP(DisasContext *s, arg_VDUP *a) | |
583 | { | |
584 | /* VDUP (general purpose register) */ | |
585 | TCGv_i32 tmp; | |
586 | int size, vec_size; | |
587 | ||
588 | if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { | |
589 | return false; | |
590 | } | |
591 | ||
592 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
593 | if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) { | |
594 | return false; | |
595 | } | |
596 | ||
597 | if (a->b && a->e) { | |
598 | return false; | |
599 | } | |
600 | ||
601 | if (a->q && (a->vn & 1)) { | |
602 | return false; | |
603 | } | |
604 | ||
605 | vec_size = a->q ? 16 : 8; | |
606 | if (a->b) { | |
607 | size = 0; | |
608 | } else if (a->e) { | |
609 | size = 1; | |
610 | } else { | |
611 | size = 2; | |
612 | } | |
613 | ||
614 | if (!vfp_access_check(s)) { | |
615 | return true; | |
616 | } | |
617 | ||
618 | tmp = load_reg(s, a->rt); | |
619 | tcg_gen_gvec_dup_i32(size, neon_reg_offset(a->vn, 0), | |
620 | vec_size, vec_size, tmp); | |
621 | tcg_temp_free_i32(tmp); | |
622 | ||
623 | return true; | |
624 | } | |
a9ab5001 PM |
625 | |
626 | static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a) | |
627 | { | |
628 | TCGv_i32 tmp; | |
629 | bool ignore_vfp_enabled = false; | |
630 | ||
631 | if (arm_dc_feature(s, ARM_FEATURE_M)) { | |
632 | /* | |
633 | * The only M-profile VFP vmrs/vmsr sysreg is FPSCR. | |
634 | * Writes to R15 are UNPREDICTABLE; we choose to undef. | |
635 | */ | |
636 | if (a->rt == 15 || a->reg != ARM_VFP_FPSCR) { | |
637 | return false; | |
638 | } | |
639 | } | |
640 | ||
641 | switch (a->reg) { | |
642 | case ARM_VFP_FPSID: | |
643 | /* | |
644 | * VFPv2 allows access to FPSID from userspace; VFPv3 restricts | |
645 | * all ID registers to privileged access only. | |
646 | */ | |
647 | if (IS_USER(s) && arm_dc_feature(s, ARM_FEATURE_VFP3)) { | |
648 | return false; | |
649 | } | |
650 | ignore_vfp_enabled = true; | |
651 | break; | |
652 | case ARM_VFP_MVFR0: | |
653 | case ARM_VFP_MVFR1: | |
654 | if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) { | |
655 | return false; | |
656 | } | |
657 | ignore_vfp_enabled = true; | |
658 | break; | |
659 | case ARM_VFP_MVFR2: | |
660 | if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) { | |
661 | return false; | |
662 | } | |
663 | ignore_vfp_enabled = true; | |
664 | break; | |
665 | case ARM_VFP_FPSCR: | |
666 | break; | |
667 | case ARM_VFP_FPEXC: | |
668 | if (IS_USER(s)) { | |
669 | return false; | |
670 | } | |
671 | ignore_vfp_enabled = true; | |
672 | break; | |
673 | case ARM_VFP_FPINST: | |
674 | case ARM_VFP_FPINST2: | |
675 | /* Not present in VFPv3 */ | |
676 | if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_VFP3)) { | |
677 | return false; | |
678 | } | |
679 | break; | |
680 | default: | |
681 | return false; | |
682 | } | |
683 | ||
684 | if (!full_vfp_access_check(s, ignore_vfp_enabled)) { | |
685 | return true; | |
686 | } | |
687 | ||
688 | if (a->l) { | |
689 | /* VMRS, move VFP special register to gp register */ | |
690 | switch (a->reg) { | |
691 | case ARM_VFP_FPSID: | |
692 | case ARM_VFP_FPEXC: | |
693 | case ARM_VFP_FPINST: | |
694 | case ARM_VFP_FPINST2: | |
695 | case ARM_VFP_MVFR0: | |
696 | case ARM_VFP_MVFR1: | |
697 | case ARM_VFP_MVFR2: | |
698 | tmp = load_cpu_field(vfp.xregs[a->reg]); | |
699 | break; | |
700 | case ARM_VFP_FPSCR: | |
701 | if (a->rt == 15) { | |
702 | tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]); | |
703 | tcg_gen_andi_i32(tmp, tmp, 0xf0000000); | |
704 | } else { | |
705 | tmp = tcg_temp_new_i32(); | |
706 | gen_helper_vfp_get_fpscr(tmp, cpu_env); | |
707 | } | |
708 | break; | |
709 | default: | |
710 | g_assert_not_reached(); | |
711 | } | |
712 | ||
713 | if (a->rt == 15) { | |
714 | /* Set the 4 flag bits in the CPSR. */ | |
715 | gen_set_nzcv(tmp); | |
716 | tcg_temp_free_i32(tmp); | |
717 | } else { | |
718 | store_reg(s, a->rt, tmp); | |
719 | } | |
720 | } else { | |
721 | /* VMSR, move gp register to VFP special register */ | |
722 | switch (a->reg) { | |
723 | case ARM_VFP_FPSID: | |
724 | case ARM_VFP_MVFR0: | |
725 | case ARM_VFP_MVFR1: | |
726 | case ARM_VFP_MVFR2: | |
727 | /* Writes are ignored. */ | |
728 | break; | |
729 | case ARM_VFP_FPSCR: | |
730 | tmp = load_reg(s, a->rt); | |
731 | gen_helper_vfp_set_fpscr(cpu_env, tmp); | |
732 | tcg_temp_free_i32(tmp); | |
733 | gen_lookup_tb(s); | |
734 | break; | |
735 | case ARM_VFP_FPEXC: | |
736 | /* | |
737 | * TODO: VFP subarchitecture support. | |
738 | * For now, keep the EN bit only | |
739 | */ | |
740 | tmp = load_reg(s, a->rt); | |
741 | tcg_gen_andi_i32(tmp, tmp, 1 << 30); | |
742 | store_cpu_field(tmp, vfp.xregs[a->reg]); | |
743 | gen_lookup_tb(s); | |
744 | break; | |
745 | case ARM_VFP_FPINST: | |
746 | case ARM_VFP_FPINST2: | |
747 | tmp = load_reg(s, a->rt); | |
748 | store_cpu_field(tmp, vfp.xregs[a->reg]); | |
749 | break; | |
750 | default: | |
751 | g_assert_not_reached(); | |
752 | } | |
753 | } | |
754 | ||
755 | return true; | |
756 | } | |
757 | ||
758 | static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a) | |
759 | { | |
760 | TCGv_i32 tmp; | |
761 | ||
762 | if (!vfp_access_check(s)) { | |
763 | return true; | |
764 | } | |
765 | ||
766 | if (a->l) { | |
767 | /* VFP to general purpose register */ | |
768 | tmp = tcg_temp_new_i32(); | |
769 | neon_load_reg32(tmp, a->vn); | |
770 | if (a->rt == 15) { | |
771 | /* Set the 4 flag bits in the CPSR. */ | |
772 | gen_set_nzcv(tmp); | |
773 | tcg_temp_free_i32(tmp); | |
774 | } else { | |
775 | store_reg(s, a->rt, tmp); | |
776 | } | |
777 | } else { | |
778 | /* general purpose register to VFP */ | |
779 | tmp = load_reg(s, a->rt); | |
780 | neon_store_reg32(tmp, a->vn); | |
781 | tcg_temp_free_i32(tmp); | |
782 | } | |
783 | ||
784 | return true; | |
785 | } |