]>
Commit | Line | Data |
---|---|---|
78e138bc PM |
1 | /* |
2 | * ARM translation: AArch32 VFP instructions | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * Copyright (c) 2005-2007 CodeSourcery | |
6 | * Copyright (c) 2007 OpenedHand, Ltd. | |
7 | * Copyright (c) 2019 Linaro, Ltd. | |
8 | * | |
9 | * This library is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
21 | */ | |
22 | ||
23 | /* | |
24 | * This file is intended to be included from translate.c; it uses | |
25 | * some macros and definitions provided by that file. | |
26 | * It might be possible to convert it to a standalone .c file eventually. | |
27 | */ | |
28 | ||
29 | /* Include the generated VFP decoder */ | |
30 | #include "decode-vfp.inc.c" | |
31 | #include "decode-vfp-uncond.inc.c" | |
06db8196 PM |
32 | |
33 | /* | |
34 | * Check that VFP access is enabled. If it is, do the necessary | |
35 | * M-profile lazy-FP handling and then return true. | |
36 | * If not, emit code to generate an appropriate exception and | |
37 | * return false. | |
38 | * The ignore_vfp_enabled argument specifies that we should ignore | |
39 | * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX | |
40 | * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns. | |
41 | */ | |
42 | static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled) | |
43 | { | |
44 | if (s->fp_excp_el) { | |
45 | if (arm_dc_feature(s, ARM_FEATURE_M)) { | |
46 | gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(), | |
47 | s->fp_excp_el); | |
48 | } else { | |
49 | gen_exception_insn(s, 4, EXCP_UDEF, | |
50 | syn_fp_access_trap(1, 0xe, false), | |
51 | s->fp_excp_el); | |
52 | } | |
53 | return false; | |
54 | } | |
55 | ||
56 | if (!s->vfp_enabled && !ignore_vfp_enabled) { | |
57 | assert(!arm_dc_feature(s, ARM_FEATURE_M)); | |
58 | gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), | |
59 | default_exception_el(s)); | |
60 | return false; | |
61 | } | |
62 | ||
63 | if (arm_dc_feature(s, ARM_FEATURE_M)) { | |
64 | /* Handle M-profile lazy FP state mechanics */ | |
65 | ||
66 | /* Trigger lazy-state preservation if necessary */ | |
67 | if (s->v7m_lspact) { | |
68 | /* | |
69 | * Lazy state saving affects external memory and also the NVIC, | |
70 | * so we must mark it as an IO operation for icount. | |
71 | */ | |
72 | if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { | |
73 | gen_io_start(); | |
74 | } | |
75 | gen_helper_v7m_preserve_fp_state(cpu_env); | |
76 | if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { | |
77 | gen_io_end(); | |
78 | } | |
79 | /* | |
80 | * If the preserve_fp_state helper doesn't throw an exception | |
81 | * then it will clear LSPACT; we don't need to repeat this for | |
82 | * any further FP insns in this TB. | |
83 | */ | |
84 | s->v7m_lspact = false; | |
85 | } | |
86 | ||
87 | /* Update ownership of FP context: set FPCCR.S to match current state */ | |
88 | if (s->v8m_fpccr_s_wrong) { | |
89 | TCGv_i32 tmp; | |
90 | ||
91 | tmp = load_cpu_field(v7m.fpccr[M_REG_S]); | |
92 | if (s->v8m_secure) { | |
93 | tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK); | |
94 | } else { | |
95 | tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK); | |
96 | } | |
97 | store_cpu_field(tmp, v7m.fpccr[M_REG_S]); | |
98 | /* Don't need to do this for any further FP insns in this TB */ | |
99 | s->v8m_fpccr_s_wrong = false; | |
100 | } | |
101 | ||
102 | if (s->v7m_new_fp_ctxt_needed) { | |
103 | /* | |
104 | * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA | |
105 | * and the FPSCR. | |
106 | */ | |
107 | TCGv_i32 control, fpscr; | |
108 | uint32_t bits = R_V7M_CONTROL_FPCA_MASK; | |
109 | ||
110 | fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]); | |
111 | gen_helper_vfp_set_fpscr(cpu_env, fpscr); | |
112 | tcg_temp_free_i32(fpscr); | |
113 | /* | |
114 | * We don't need to arrange to end the TB, because the only | |
115 | * parts of FPSCR which we cache in the TB flags are the VECLEN | |
116 | * and VECSTRIDE, and those don't exist for M-profile. | |
117 | */ | |
118 | ||
119 | if (s->v8m_secure) { | |
120 | bits |= R_V7M_CONTROL_SFPA_MASK; | |
121 | } | |
122 | control = load_cpu_field(v7m.control[M_REG_S]); | |
123 | tcg_gen_ori_i32(control, control, bits); | |
124 | store_cpu_field(control, v7m.control[M_REG_S]); | |
125 | /* Don't need to do this for any further FP insns in this TB */ | |
126 | s->v7m_new_fp_ctxt_needed = false; | |
127 | } | |
128 | } | |
129 | ||
130 | return true; | |
131 | } | |
b3ff4b87 PM |
132 | |
133 | /* | |
134 | * The most usual kind of VFP access check, for everything except | |
135 | * FMXR/FMRX to the always-available special registers. | |
136 | */ | |
137 | static bool vfp_access_check(DisasContext *s) | |
138 | { | |
139 | return full_vfp_access_check(s, false); | |
140 | } | |
f7bbb8f3 PM |
141 | |
142 | static bool trans_VSEL(DisasContext *s, arg_VSEL *a) | |
143 | { | |
144 | uint32_t rd, rn, rm; | |
145 | bool dp = a->dp; | |
146 | ||
147 | if (!dc_isar_feature(aa32_vsel, s)) { | |
148 | return false; | |
149 | } | |
150 | ||
151 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
152 | if (dp && !dc_isar_feature(aa32_fp_d32, s) && | |
153 | ((a->vm | a->vn | a->vd) & 0x10)) { | |
154 | return false; | |
155 | } | |
156 | rd = a->vd; | |
157 | rn = a->vn; | |
158 | rm = a->vm; | |
159 | ||
160 | if (!vfp_access_check(s)) { | |
161 | return true; | |
162 | } | |
163 | ||
164 | if (dp) { | |
165 | TCGv_i64 frn, frm, dest; | |
166 | TCGv_i64 tmp, zero, zf, nf, vf; | |
167 | ||
168 | zero = tcg_const_i64(0); | |
169 | ||
170 | frn = tcg_temp_new_i64(); | |
171 | frm = tcg_temp_new_i64(); | |
172 | dest = tcg_temp_new_i64(); | |
173 | ||
174 | zf = tcg_temp_new_i64(); | |
175 | nf = tcg_temp_new_i64(); | |
176 | vf = tcg_temp_new_i64(); | |
177 | ||
178 | tcg_gen_extu_i32_i64(zf, cpu_ZF); | |
179 | tcg_gen_ext_i32_i64(nf, cpu_NF); | |
180 | tcg_gen_ext_i32_i64(vf, cpu_VF); | |
181 | ||
160f3b64 PM |
182 | neon_load_reg64(frn, rn); |
183 | neon_load_reg64(frm, rm); | |
f7bbb8f3 PM |
184 | switch (a->cc) { |
185 | case 0: /* eq: Z */ | |
186 | tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero, | |
187 | frn, frm); | |
188 | break; | |
189 | case 1: /* vs: V */ | |
190 | tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero, | |
191 | frn, frm); | |
192 | break; | |
193 | case 2: /* ge: N == V -> N ^ V == 0 */ | |
194 | tmp = tcg_temp_new_i64(); | |
195 | tcg_gen_xor_i64(tmp, vf, nf); | |
196 | tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, | |
197 | frn, frm); | |
198 | tcg_temp_free_i64(tmp); | |
199 | break; | |
200 | case 3: /* gt: !Z && N == V */ | |
201 | tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero, | |
202 | frn, frm); | |
203 | tmp = tcg_temp_new_i64(); | |
204 | tcg_gen_xor_i64(tmp, vf, nf); | |
205 | tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, | |
206 | dest, frm); | |
207 | tcg_temp_free_i64(tmp); | |
208 | break; | |
209 | } | |
160f3b64 | 210 | neon_store_reg64(dest, rd); |
f7bbb8f3 PM |
211 | tcg_temp_free_i64(frn); |
212 | tcg_temp_free_i64(frm); | |
213 | tcg_temp_free_i64(dest); | |
214 | ||
215 | tcg_temp_free_i64(zf); | |
216 | tcg_temp_free_i64(nf); | |
217 | tcg_temp_free_i64(vf); | |
218 | ||
219 | tcg_temp_free_i64(zero); | |
220 | } else { | |
221 | TCGv_i32 frn, frm, dest; | |
222 | TCGv_i32 tmp, zero; | |
223 | ||
224 | zero = tcg_const_i32(0); | |
225 | ||
226 | frn = tcg_temp_new_i32(); | |
227 | frm = tcg_temp_new_i32(); | |
228 | dest = tcg_temp_new_i32(); | |
160f3b64 PM |
229 | neon_load_reg32(frn, rn); |
230 | neon_load_reg32(frm, rm); | |
f7bbb8f3 PM |
231 | switch (a->cc) { |
232 | case 0: /* eq: Z */ | |
233 | tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero, | |
234 | frn, frm); | |
235 | break; | |
236 | case 1: /* vs: V */ | |
237 | tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero, | |
238 | frn, frm); | |
239 | break; | |
240 | case 2: /* ge: N == V -> N ^ V == 0 */ | |
241 | tmp = tcg_temp_new_i32(); | |
242 | tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); | |
243 | tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, | |
244 | frn, frm); | |
245 | tcg_temp_free_i32(tmp); | |
246 | break; | |
247 | case 3: /* gt: !Z && N == V */ | |
248 | tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero, | |
249 | frn, frm); | |
250 | tmp = tcg_temp_new_i32(); | |
251 | tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); | |
252 | tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, | |
253 | dest, frm); | |
254 | tcg_temp_free_i32(tmp); | |
255 | break; | |
256 | } | |
160f3b64 | 257 | neon_store_reg32(dest, rd); |
f7bbb8f3 PM |
258 | tcg_temp_free_i32(frn); |
259 | tcg_temp_free_i32(frm); | |
260 | tcg_temp_free_i32(dest); | |
261 | ||
262 | tcg_temp_free_i32(zero); | |
263 | } | |
264 | ||
265 | return true; | |
266 | } | |
267 | ||
268 | static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a) | |
269 | { | |
270 | uint32_t rd, rn, rm; | |
271 | bool dp = a->dp; | |
272 | bool vmin = a->op; | |
273 | TCGv_ptr fpst; | |
274 | ||
275 | if (!dc_isar_feature(aa32_vminmaxnm, s)) { | |
276 | return false; | |
277 | } | |
278 | ||
279 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
280 | if (dp && !dc_isar_feature(aa32_fp_d32, s) && | |
281 | ((a->vm | a->vn | a->vd) & 0x10)) { | |
282 | return false; | |
283 | } | |
284 | rd = a->vd; | |
285 | rn = a->vn; | |
286 | rm = a->vm; | |
287 | ||
288 | if (!vfp_access_check(s)) { | |
289 | return true; | |
290 | } | |
291 | ||
292 | fpst = get_fpstatus_ptr(0); | |
293 | ||
294 | if (dp) { | |
295 | TCGv_i64 frn, frm, dest; | |
296 | ||
297 | frn = tcg_temp_new_i64(); | |
298 | frm = tcg_temp_new_i64(); | |
299 | dest = tcg_temp_new_i64(); | |
300 | ||
160f3b64 PM |
301 | neon_load_reg64(frn, rn); |
302 | neon_load_reg64(frm, rm); | |
f7bbb8f3 PM |
303 | if (vmin) { |
304 | gen_helper_vfp_minnumd(dest, frn, frm, fpst); | |
305 | } else { | |
306 | gen_helper_vfp_maxnumd(dest, frn, frm, fpst); | |
307 | } | |
160f3b64 | 308 | neon_store_reg64(dest, rd); |
f7bbb8f3 PM |
309 | tcg_temp_free_i64(frn); |
310 | tcg_temp_free_i64(frm); | |
311 | tcg_temp_free_i64(dest); | |
312 | } else { | |
313 | TCGv_i32 frn, frm, dest; | |
314 | ||
315 | frn = tcg_temp_new_i32(); | |
316 | frm = tcg_temp_new_i32(); | |
317 | dest = tcg_temp_new_i32(); | |
318 | ||
160f3b64 PM |
319 | neon_load_reg32(frn, rn); |
320 | neon_load_reg32(frm, rm); | |
f7bbb8f3 PM |
321 | if (vmin) { |
322 | gen_helper_vfp_minnums(dest, frn, frm, fpst); | |
323 | } else { | |
324 | gen_helper_vfp_maxnums(dest, frn, frm, fpst); | |
325 | } | |
160f3b64 | 326 | neon_store_reg32(dest, rd); |
f7bbb8f3 PM |
327 | tcg_temp_free_i32(frn); |
328 | tcg_temp_free_i32(frm); | |
329 | tcg_temp_free_i32(dest); | |
330 | } | |
331 | ||
332 | tcg_temp_free_ptr(fpst); | |
333 | return true; | |
334 | } | |
335 | ||
336 | /* | |
337 | * Table for converting the most common AArch32 encoding of | |
338 | * rounding mode to arm_fprounding order (which matches the | |
339 | * common AArch64 order); see ARM ARM pseudocode FPDecodeRM(). | |
340 | */ | |
341 | static const uint8_t fp_decode_rm[] = { | |
342 | FPROUNDING_TIEAWAY, | |
343 | FPROUNDING_TIEEVEN, | |
344 | FPROUNDING_POSINF, | |
345 | FPROUNDING_NEGINF, | |
346 | }; | |
347 | ||
348 | static bool trans_VRINT(DisasContext *s, arg_VRINT *a) | |
349 | { | |
350 | uint32_t rd, rm; | |
351 | bool dp = a->dp; | |
352 | TCGv_ptr fpst; | |
353 | TCGv_i32 tcg_rmode; | |
354 | int rounding = fp_decode_rm[a->rm]; | |
355 | ||
356 | if (!dc_isar_feature(aa32_vrint, s)) { | |
357 | return false; | |
358 | } | |
359 | ||
360 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
361 | if (dp && !dc_isar_feature(aa32_fp_d32, s) && | |
362 | ((a->vm | a->vd) & 0x10)) { | |
363 | return false; | |
364 | } | |
365 | rd = a->vd; | |
366 | rm = a->vm; | |
367 | ||
368 | if (!vfp_access_check(s)) { | |
369 | return true; | |
370 | } | |
371 | ||
372 | fpst = get_fpstatus_ptr(0); | |
373 | ||
374 | tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); | |
375 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | |
376 | ||
377 | if (dp) { | |
378 | TCGv_i64 tcg_op; | |
379 | TCGv_i64 tcg_res; | |
380 | tcg_op = tcg_temp_new_i64(); | |
381 | tcg_res = tcg_temp_new_i64(); | |
160f3b64 | 382 | neon_load_reg64(tcg_op, rm); |
f7bbb8f3 | 383 | gen_helper_rintd(tcg_res, tcg_op, fpst); |
160f3b64 | 384 | neon_store_reg64(tcg_res, rd); |
f7bbb8f3 PM |
385 | tcg_temp_free_i64(tcg_op); |
386 | tcg_temp_free_i64(tcg_res); | |
387 | } else { | |
388 | TCGv_i32 tcg_op; | |
389 | TCGv_i32 tcg_res; | |
390 | tcg_op = tcg_temp_new_i32(); | |
391 | tcg_res = tcg_temp_new_i32(); | |
160f3b64 | 392 | neon_load_reg32(tcg_op, rm); |
f7bbb8f3 | 393 | gen_helper_rints(tcg_res, tcg_op, fpst); |
160f3b64 | 394 | neon_store_reg32(tcg_res, rd); |
f7bbb8f3 PM |
395 | tcg_temp_free_i32(tcg_op); |
396 | tcg_temp_free_i32(tcg_res); | |
397 | } | |
398 | ||
399 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | |
400 | tcg_temp_free_i32(tcg_rmode); | |
401 | ||
402 | tcg_temp_free_ptr(fpst); | |
403 | return true; | |
404 | } | |
405 | ||
406 | static bool trans_VCVT(DisasContext *s, arg_VCVT *a) | |
407 | { | |
408 | uint32_t rd, rm; | |
409 | bool dp = a->dp; | |
410 | TCGv_ptr fpst; | |
411 | TCGv_i32 tcg_rmode, tcg_shift; | |
412 | int rounding = fp_decode_rm[a->rm]; | |
413 | bool is_signed = a->op; | |
414 | ||
415 | if (!dc_isar_feature(aa32_vcvt_dr, s)) { | |
416 | return false; | |
417 | } | |
418 | ||
419 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
420 | if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { | |
421 | return false; | |
422 | } | |
423 | rd = a->vd; | |
424 | rm = a->vm; | |
425 | ||
426 | if (!vfp_access_check(s)) { | |
427 | return true; | |
428 | } | |
429 | ||
430 | fpst = get_fpstatus_ptr(0); | |
431 | ||
432 | tcg_shift = tcg_const_i32(0); | |
433 | ||
434 | tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); | |
435 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | |
436 | ||
437 | if (dp) { | |
438 | TCGv_i64 tcg_double, tcg_res; | |
439 | TCGv_i32 tcg_tmp; | |
440 | tcg_double = tcg_temp_new_i64(); | |
441 | tcg_res = tcg_temp_new_i64(); | |
442 | tcg_tmp = tcg_temp_new_i32(); | |
160f3b64 | 443 | neon_load_reg64(tcg_double, rm); |
f7bbb8f3 PM |
444 | if (is_signed) { |
445 | gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst); | |
446 | } else { | |
447 | gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst); | |
448 | } | |
449 | tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res); | |
160f3b64 | 450 | neon_store_reg32(tcg_tmp, rd); |
f7bbb8f3 PM |
451 | tcg_temp_free_i32(tcg_tmp); |
452 | tcg_temp_free_i64(tcg_res); | |
453 | tcg_temp_free_i64(tcg_double); | |
454 | } else { | |
455 | TCGv_i32 tcg_single, tcg_res; | |
456 | tcg_single = tcg_temp_new_i32(); | |
457 | tcg_res = tcg_temp_new_i32(); | |
160f3b64 | 458 | neon_load_reg32(tcg_single, rm); |
f7bbb8f3 PM |
459 | if (is_signed) { |
460 | gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst); | |
461 | } else { | |
462 | gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst); | |
463 | } | |
160f3b64 | 464 | neon_store_reg32(tcg_res, rd); |
f7bbb8f3 PM |
465 | tcg_temp_free_i32(tcg_res); |
466 | tcg_temp_free_i32(tcg_single); | |
467 | } | |
468 | ||
469 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | |
470 | tcg_temp_free_i32(tcg_rmode); | |
471 | ||
472 | tcg_temp_free_i32(tcg_shift); | |
473 | ||
474 | tcg_temp_free_ptr(fpst); | |
475 | ||
476 | return true; | |
477 | } | |
9851ed92 PM |
478 | |
479 | static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a) | |
480 | { | |
481 | /* VMOV scalar to general purpose register */ | |
482 | TCGv_i32 tmp; | |
483 | int pass; | |
484 | uint32_t offset; | |
485 | ||
486 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
487 | if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) { | |
488 | return false; | |
489 | } | |
490 | ||
491 | offset = a->index << a->size; | |
492 | pass = extract32(offset, 2, 1); | |
493 | offset = extract32(offset, 0, 2) * 8; | |
494 | ||
495 | if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) { | |
496 | return false; | |
497 | } | |
498 | ||
499 | if (!vfp_access_check(s)) { | |
500 | return true; | |
501 | } | |
502 | ||
503 | tmp = neon_load_reg(a->vn, pass); | |
504 | switch (a->size) { | |
505 | case 0: | |
506 | if (offset) { | |
507 | tcg_gen_shri_i32(tmp, tmp, offset); | |
508 | } | |
509 | if (a->u) { | |
510 | gen_uxtb(tmp); | |
511 | } else { | |
512 | gen_sxtb(tmp); | |
513 | } | |
514 | break; | |
515 | case 1: | |
516 | if (a->u) { | |
517 | if (offset) { | |
518 | tcg_gen_shri_i32(tmp, tmp, 16); | |
519 | } else { | |
520 | gen_uxth(tmp); | |
521 | } | |
522 | } else { | |
523 | if (offset) { | |
524 | tcg_gen_sari_i32(tmp, tmp, 16); | |
525 | } else { | |
526 | gen_sxth(tmp); | |
527 | } | |
528 | } | |
529 | break; | |
530 | case 2: | |
531 | break; | |
532 | } | |
533 | store_reg(s, a->rt, tmp); | |
534 | ||
535 | return true; | |
536 | } | |
537 | ||
538 | static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a) | |
539 | { | |
540 | /* VMOV general purpose register to scalar */ | |
541 | TCGv_i32 tmp, tmp2; | |
542 | int pass; | |
543 | uint32_t offset; | |
544 | ||
545 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
546 | if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) { | |
547 | return false; | |
548 | } | |
549 | ||
550 | offset = a->index << a->size; | |
551 | pass = extract32(offset, 2, 1); | |
552 | offset = extract32(offset, 0, 2) * 8; | |
553 | ||
554 | if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) { | |
555 | return false; | |
556 | } | |
557 | ||
558 | if (!vfp_access_check(s)) { | |
559 | return true; | |
560 | } | |
561 | ||
562 | tmp = load_reg(s, a->rt); | |
563 | switch (a->size) { | |
564 | case 0: | |
565 | tmp2 = neon_load_reg(a->vn, pass); | |
566 | tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8); | |
567 | tcg_temp_free_i32(tmp2); | |
568 | break; | |
569 | case 1: | |
570 | tmp2 = neon_load_reg(a->vn, pass); | |
571 | tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16); | |
572 | tcg_temp_free_i32(tmp2); | |
573 | break; | |
574 | case 2: | |
575 | break; | |
576 | } | |
577 | neon_store_reg(a->vn, pass, tmp); | |
578 | ||
579 | return true; | |
580 | } | |
581 | ||
582 | static bool trans_VDUP(DisasContext *s, arg_VDUP *a) | |
583 | { | |
584 | /* VDUP (general purpose register) */ | |
585 | TCGv_i32 tmp; | |
586 | int size, vec_size; | |
587 | ||
588 | if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { | |
589 | return false; | |
590 | } | |
591 | ||
592 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
593 | if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) { | |
594 | return false; | |
595 | } | |
596 | ||
597 | if (a->b && a->e) { | |
598 | return false; | |
599 | } | |
600 | ||
601 | if (a->q && (a->vn & 1)) { | |
602 | return false; | |
603 | } | |
604 | ||
605 | vec_size = a->q ? 16 : 8; | |
606 | if (a->b) { | |
607 | size = 0; | |
608 | } else if (a->e) { | |
609 | size = 1; | |
610 | } else { | |
611 | size = 2; | |
612 | } | |
613 | ||
614 | if (!vfp_access_check(s)) { | |
615 | return true; | |
616 | } | |
617 | ||
618 | tmp = load_reg(s, a->rt); | |
619 | tcg_gen_gvec_dup_i32(size, neon_reg_offset(a->vn, 0), | |
620 | vec_size, vec_size, tmp); | |
621 | tcg_temp_free_i32(tmp); | |
622 | ||
623 | return true; | |
624 | } | |
a9ab5001 PM |
625 | |
626 | static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a) | |
627 | { | |
628 | TCGv_i32 tmp; | |
629 | bool ignore_vfp_enabled = false; | |
630 | ||
631 | if (arm_dc_feature(s, ARM_FEATURE_M)) { | |
632 | /* | |
633 | * The only M-profile VFP vmrs/vmsr sysreg is FPSCR. | |
634 | * Writes to R15 are UNPREDICTABLE; we choose to undef. | |
635 | */ | |
636 | if (a->rt == 15 || a->reg != ARM_VFP_FPSCR) { | |
637 | return false; | |
638 | } | |
639 | } | |
640 | ||
641 | switch (a->reg) { | |
642 | case ARM_VFP_FPSID: | |
643 | /* | |
644 | * VFPv2 allows access to FPSID from userspace; VFPv3 restricts | |
645 | * all ID registers to privileged access only. | |
646 | */ | |
647 | if (IS_USER(s) && arm_dc_feature(s, ARM_FEATURE_VFP3)) { | |
648 | return false; | |
649 | } | |
650 | ignore_vfp_enabled = true; | |
651 | break; | |
652 | case ARM_VFP_MVFR0: | |
653 | case ARM_VFP_MVFR1: | |
654 | if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) { | |
655 | return false; | |
656 | } | |
657 | ignore_vfp_enabled = true; | |
658 | break; | |
659 | case ARM_VFP_MVFR2: | |
660 | if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) { | |
661 | return false; | |
662 | } | |
663 | ignore_vfp_enabled = true; | |
664 | break; | |
665 | case ARM_VFP_FPSCR: | |
666 | break; | |
667 | case ARM_VFP_FPEXC: | |
668 | if (IS_USER(s)) { | |
669 | return false; | |
670 | } | |
671 | ignore_vfp_enabled = true; | |
672 | break; | |
673 | case ARM_VFP_FPINST: | |
674 | case ARM_VFP_FPINST2: | |
675 | /* Not present in VFPv3 */ | |
676 | if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_VFP3)) { | |
677 | return false; | |
678 | } | |
679 | break; | |
680 | default: | |
681 | return false; | |
682 | } | |
683 | ||
684 | if (!full_vfp_access_check(s, ignore_vfp_enabled)) { | |
685 | return true; | |
686 | } | |
687 | ||
688 | if (a->l) { | |
689 | /* VMRS, move VFP special register to gp register */ | |
690 | switch (a->reg) { | |
691 | case ARM_VFP_FPSID: | |
692 | case ARM_VFP_FPEXC: | |
693 | case ARM_VFP_FPINST: | |
694 | case ARM_VFP_FPINST2: | |
695 | case ARM_VFP_MVFR0: | |
696 | case ARM_VFP_MVFR1: | |
697 | case ARM_VFP_MVFR2: | |
698 | tmp = load_cpu_field(vfp.xregs[a->reg]); | |
699 | break; | |
700 | case ARM_VFP_FPSCR: | |
701 | if (a->rt == 15) { | |
702 | tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]); | |
703 | tcg_gen_andi_i32(tmp, tmp, 0xf0000000); | |
704 | } else { | |
705 | tmp = tcg_temp_new_i32(); | |
706 | gen_helper_vfp_get_fpscr(tmp, cpu_env); | |
707 | } | |
708 | break; | |
709 | default: | |
710 | g_assert_not_reached(); | |
711 | } | |
712 | ||
713 | if (a->rt == 15) { | |
714 | /* Set the 4 flag bits in the CPSR. */ | |
715 | gen_set_nzcv(tmp); | |
716 | tcg_temp_free_i32(tmp); | |
717 | } else { | |
718 | store_reg(s, a->rt, tmp); | |
719 | } | |
720 | } else { | |
721 | /* VMSR, move gp register to VFP special register */ | |
722 | switch (a->reg) { | |
723 | case ARM_VFP_FPSID: | |
724 | case ARM_VFP_MVFR0: | |
725 | case ARM_VFP_MVFR1: | |
726 | case ARM_VFP_MVFR2: | |
727 | /* Writes are ignored. */ | |
728 | break; | |
729 | case ARM_VFP_FPSCR: | |
730 | tmp = load_reg(s, a->rt); | |
731 | gen_helper_vfp_set_fpscr(cpu_env, tmp); | |
732 | tcg_temp_free_i32(tmp); | |
733 | gen_lookup_tb(s); | |
734 | break; | |
735 | case ARM_VFP_FPEXC: | |
736 | /* | |
737 | * TODO: VFP subarchitecture support. | |
738 | * For now, keep the EN bit only | |
739 | */ | |
740 | tmp = load_reg(s, a->rt); | |
741 | tcg_gen_andi_i32(tmp, tmp, 1 << 30); | |
742 | store_cpu_field(tmp, vfp.xregs[a->reg]); | |
743 | gen_lookup_tb(s); | |
744 | break; | |
745 | case ARM_VFP_FPINST: | |
746 | case ARM_VFP_FPINST2: | |
747 | tmp = load_reg(s, a->rt); | |
748 | store_cpu_field(tmp, vfp.xregs[a->reg]); | |
749 | break; | |
750 | default: | |
751 | g_assert_not_reached(); | |
752 | } | |
753 | } | |
754 | ||
755 | return true; | |
756 | } | |
757 | ||
758 | static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a) | |
759 | { | |
760 | TCGv_i32 tmp; | |
761 | ||
762 | if (!vfp_access_check(s)) { | |
763 | return true; | |
764 | } | |
765 | ||
766 | if (a->l) { | |
767 | /* VFP to general purpose register */ | |
768 | tmp = tcg_temp_new_i32(); | |
769 | neon_load_reg32(tmp, a->vn); | |
770 | if (a->rt == 15) { | |
771 | /* Set the 4 flag bits in the CPSR. */ | |
772 | gen_set_nzcv(tmp); | |
773 | tcg_temp_free_i32(tmp); | |
774 | } else { | |
775 | store_reg(s, a->rt, tmp); | |
776 | } | |
777 | } else { | |
778 | /* general purpose register to VFP */ | |
779 | tmp = load_reg(s, a->rt); | |
780 | neon_store_reg32(tmp, a->vn); | |
781 | tcg_temp_free_i32(tmp); | |
782 | } | |
783 | ||
784 | return true; | |
785 | } | |
81f68110 PM |
786 | |
787 | static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a) | |
788 | { | |
789 | TCGv_i32 tmp; | |
790 | ||
791 | /* | |
792 | * VMOV between two general-purpose registers and two single precision | |
793 | * floating point registers | |
794 | */ | |
795 | if (!vfp_access_check(s)) { | |
796 | return true; | |
797 | } | |
798 | ||
799 | if (a->op) { | |
800 | /* fpreg to gpreg */ | |
801 | tmp = tcg_temp_new_i32(); | |
802 | neon_load_reg32(tmp, a->vm); | |
803 | store_reg(s, a->rt, tmp); | |
804 | tmp = tcg_temp_new_i32(); | |
805 | neon_load_reg32(tmp, a->vm + 1); | |
806 | store_reg(s, a->rt2, tmp); | |
807 | } else { | |
808 | /* gpreg to fpreg */ | |
809 | tmp = load_reg(s, a->rt); | |
810 | neon_store_reg32(tmp, a->vm); | |
811 | tmp = load_reg(s, a->rt2); | |
812 | neon_store_reg32(tmp, a->vm + 1); | |
813 | } | |
814 | ||
815 | return true; | |
816 | } | |
817 | ||
818 | static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a) | |
819 | { | |
820 | TCGv_i32 tmp; | |
821 | ||
822 | /* | |
823 | * VMOV between two general-purpose registers and one double precision | |
824 | * floating point register | |
825 | */ | |
826 | ||
827 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
828 | if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { | |
829 | return false; | |
830 | } | |
831 | ||
832 | if (!vfp_access_check(s)) { | |
833 | return true; | |
834 | } | |
835 | ||
836 | if (a->op) { | |
837 | /* fpreg to gpreg */ | |
838 | tmp = tcg_temp_new_i32(); | |
839 | neon_load_reg32(tmp, a->vm * 2); | |
840 | store_reg(s, a->rt, tmp); | |
841 | tmp = tcg_temp_new_i32(); | |
842 | neon_load_reg32(tmp, a->vm * 2 + 1); | |
843 | store_reg(s, a->rt2, tmp); | |
844 | } else { | |
845 | /* gpreg to fpreg */ | |
846 | tmp = load_reg(s, a->rt); | |
847 | neon_store_reg32(tmp, a->vm * 2); | |
848 | tcg_temp_free_i32(tmp); | |
849 | tmp = load_reg(s, a->rt2); | |
850 | neon_store_reg32(tmp, a->vm * 2 + 1); | |
851 | tcg_temp_free_i32(tmp); | |
852 | } | |
853 | ||
854 | return true; | |
855 | } | |
79b02a3b PM |
856 | |
857 | static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) | |
858 | { | |
859 | uint32_t offset; | |
3993d040 | 860 | TCGv_i32 addr, tmp; |
79b02a3b PM |
861 | |
862 | if (!vfp_access_check(s)) { | |
863 | return true; | |
864 | } | |
865 | ||
866 | offset = a->imm << 2; | |
867 | if (!a->u) { | |
868 | offset = -offset; | |
869 | } | |
870 | ||
871 | if (s->thumb && a->rn == 15) { | |
872 | /* This is actually UNPREDICTABLE */ | |
873 | addr = tcg_temp_new_i32(); | |
874 | tcg_gen_movi_i32(addr, s->pc & ~2); | |
875 | } else { | |
876 | addr = load_reg(s, a->rn); | |
877 | } | |
878 | tcg_gen_addi_i32(addr, addr, offset); | |
3993d040 | 879 | tmp = tcg_temp_new_i32(); |
79b02a3b | 880 | if (a->l) { |
3993d040 PM |
881 | gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); |
882 | neon_store_reg32(tmp, a->vd); | |
79b02a3b | 883 | } else { |
3993d040 PM |
884 | neon_load_reg32(tmp, a->vd); |
885 | gen_aa32_st32(s, tmp, addr, get_mem_index(s)); | |
79b02a3b | 886 | } |
3993d040 | 887 | tcg_temp_free_i32(tmp); |
79b02a3b PM |
888 | tcg_temp_free_i32(addr); |
889 | ||
890 | return true; | |
891 | } | |
892 | ||
893 | static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a) | |
894 | { | |
895 | uint32_t offset; | |
896 | TCGv_i32 addr; | |
3993d040 | 897 | TCGv_i64 tmp; |
79b02a3b PM |
898 | |
899 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
900 | if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { | |
901 | return false; | |
902 | } | |
903 | ||
904 | if (!vfp_access_check(s)) { | |
905 | return true; | |
906 | } | |
907 | ||
908 | offset = a->imm << 2; | |
909 | if (!a->u) { | |
910 | offset = -offset; | |
911 | } | |
912 | ||
913 | if (s->thumb && a->rn == 15) { | |
914 | /* This is actually UNPREDICTABLE */ | |
915 | addr = tcg_temp_new_i32(); | |
916 | tcg_gen_movi_i32(addr, s->pc & ~2); | |
917 | } else { | |
918 | addr = load_reg(s, a->rn); | |
919 | } | |
920 | tcg_gen_addi_i32(addr, addr, offset); | |
3993d040 | 921 | tmp = tcg_temp_new_i64(); |
79b02a3b | 922 | if (a->l) { |
3993d040 PM |
923 | gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); |
924 | neon_store_reg64(tmp, a->vd); | |
79b02a3b | 925 | } else { |
3993d040 PM |
926 | neon_load_reg64(tmp, a->vd); |
927 | gen_aa32_st64(s, tmp, addr, get_mem_index(s)); | |
79b02a3b | 928 | } |
3993d040 | 929 | tcg_temp_free_i64(tmp); |
79b02a3b PM |
930 | tcg_temp_free_i32(addr); |
931 | ||
932 | return true; | |
933 | } | |
fa288de2 PM |
934 | |
935 | static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) | |
936 | { | |
937 | uint32_t offset; | |
3993d040 | 938 | TCGv_i32 addr, tmp; |
fa288de2 PM |
939 | int i, n; |
940 | ||
941 | n = a->imm; | |
942 | ||
943 | if (n == 0 || (a->vd + n) > 32) { | |
944 | /* | |
945 | * UNPREDICTABLE cases for bad immediates: we choose to | |
946 | * UNDEF to avoid generating huge numbers of TCG ops | |
947 | */ | |
948 | return false; | |
949 | } | |
950 | if (a->rn == 15 && a->w) { | |
951 | /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ | |
952 | return false; | |
953 | } | |
954 | ||
955 | if (!vfp_access_check(s)) { | |
956 | return true; | |
957 | } | |
958 | ||
959 | if (s->thumb && a->rn == 15) { | |
960 | /* This is actually UNPREDICTABLE */ | |
961 | addr = tcg_temp_new_i32(); | |
962 | tcg_gen_movi_i32(addr, s->pc & ~2); | |
963 | } else { | |
964 | addr = load_reg(s, a->rn); | |
965 | } | |
966 | if (a->p) { | |
967 | /* pre-decrement */ | |
968 | tcg_gen_addi_i32(addr, addr, -(a->imm << 2)); | |
969 | } | |
970 | ||
971 | if (s->v8m_stackcheck && a->rn == 13 && a->w) { | |
972 | /* | |
973 | * Here 'addr' is the lowest address we will store to, | |
974 | * and is either the old SP (if post-increment) or | |
975 | * the new SP (if pre-decrement). For post-increment | |
976 | * where the old value is below the limit and the new | |
977 | * value is above, it is UNKNOWN whether the limit check | |
978 | * triggers; we choose to trigger. | |
979 | */ | |
980 | gen_helper_v8m_stackcheck(cpu_env, addr); | |
981 | } | |
982 | ||
983 | offset = 4; | |
3993d040 | 984 | tmp = tcg_temp_new_i32(); |
fa288de2 PM |
985 | for (i = 0; i < n; i++) { |
986 | if (a->l) { | |
987 | /* load */ | |
3993d040 PM |
988 | gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); |
989 | neon_store_reg32(tmp, a->vd + i); | |
fa288de2 PM |
990 | } else { |
991 | /* store */ | |
3993d040 PM |
992 | neon_load_reg32(tmp, a->vd + i); |
993 | gen_aa32_st32(s, tmp, addr, get_mem_index(s)); | |
fa288de2 PM |
994 | } |
995 | tcg_gen_addi_i32(addr, addr, offset); | |
996 | } | |
3993d040 | 997 | tcg_temp_free_i32(tmp); |
fa288de2 PM |
998 | if (a->w) { |
999 | /* writeback */ | |
1000 | if (a->p) { | |
1001 | offset = -offset * n; | |
1002 | tcg_gen_addi_i32(addr, addr, offset); | |
1003 | } | |
1004 | store_reg(s, a->rn, addr); | |
1005 | } else { | |
1006 | tcg_temp_free_i32(addr); | |
1007 | } | |
1008 | ||
1009 | return true; | |
1010 | } | |
1011 | ||
1012 | static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) | |
1013 | { | |
1014 | uint32_t offset; | |
1015 | TCGv_i32 addr; | |
3993d040 | 1016 | TCGv_i64 tmp; |
fa288de2 PM |
1017 | int i, n; |
1018 | ||
1019 | n = a->imm >> 1; | |
1020 | ||
1021 | if (n == 0 || (a->vd + n) > 32 || n > 16) { | |
1022 | /* | |
1023 | * UNPREDICTABLE cases for bad immediates: we choose to | |
1024 | * UNDEF to avoid generating huge numbers of TCG ops | |
1025 | */ | |
1026 | return false; | |
1027 | } | |
1028 | if (a->rn == 15 && a->w) { | |
1029 | /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ | |
1030 | return false; | |
1031 | } | |
1032 | ||
1033 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
1034 | if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd + n) > 16) { | |
1035 | return false; | |
1036 | } | |
1037 | ||
1038 | if (!vfp_access_check(s)) { | |
1039 | return true; | |
1040 | } | |
1041 | ||
1042 | if (s->thumb && a->rn == 15) { | |
1043 | /* This is actually UNPREDICTABLE */ | |
1044 | addr = tcg_temp_new_i32(); | |
1045 | tcg_gen_movi_i32(addr, s->pc & ~2); | |
1046 | } else { | |
1047 | addr = load_reg(s, a->rn); | |
1048 | } | |
1049 | if (a->p) { | |
1050 | /* pre-decrement */ | |
1051 | tcg_gen_addi_i32(addr, addr, -(a->imm << 2)); | |
1052 | } | |
1053 | ||
1054 | if (s->v8m_stackcheck && a->rn == 13 && a->w) { | |
1055 | /* | |
1056 | * Here 'addr' is the lowest address we will store to, | |
1057 | * and is either the old SP (if post-increment) or | |
1058 | * the new SP (if pre-decrement). For post-increment | |
1059 | * where the old value is below the limit and the new | |
1060 | * value is above, it is UNKNOWN whether the limit check | |
1061 | * triggers; we choose to trigger. | |
1062 | */ | |
1063 | gen_helper_v8m_stackcheck(cpu_env, addr); | |
1064 | } | |
1065 | ||
1066 | offset = 8; | |
3993d040 | 1067 | tmp = tcg_temp_new_i64(); |
fa288de2 PM |
1068 | for (i = 0; i < n; i++) { |
1069 | if (a->l) { | |
1070 | /* load */ | |
3993d040 PM |
1071 | gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); |
1072 | neon_store_reg64(tmp, a->vd + i); | |
fa288de2 PM |
1073 | } else { |
1074 | /* store */ | |
3993d040 PM |
1075 | neon_load_reg64(tmp, a->vd + i); |
1076 | gen_aa32_st64(s, tmp, addr, get_mem_index(s)); | |
fa288de2 PM |
1077 | } |
1078 | tcg_gen_addi_i32(addr, addr, offset); | |
1079 | } | |
3993d040 | 1080 | tcg_temp_free_i64(tmp); |
fa288de2 PM |
1081 | if (a->w) { |
1082 | /* writeback */ | |
1083 | if (a->p) { | |
1084 | offset = -offset * n; | |
1085 | } else if (a->imm & 1) { | |
1086 | offset = 4; | |
1087 | } else { | |
1088 | offset = 0; | |
1089 | } | |
1090 | ||
1091 | if (offset != 0) { | |
1092 | tcg_gen_addi_i32(addr, addr, offset); | |
1093 | } | |
1094 | store_reg(s, a->rn, addr); | |
1095 | } else { | |
1096 | tcg_temp_free_i32(addr); | |
1097 | } | |
1098 | ||
1099 | return true; | |
1100 | } | |
266bd25c PM |
1101 | |
1102 | /* | |
1103 | * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp(). | |
1104 | * The callback should emit code to write a value to vd. If | |
1105 | * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd | |
1106 | * will contain the old value of the relevant VFP register; | |
1107 | * otherwise it must be written to only. | |
1108 | */ | |
1109 | typedef void VFPGen3OpSPFn(TCGv_i32 vd, | |
1110 | TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst); | |
1111 | typedef void VFPGen3OpDPFn(TCGv_i64 vd, | |
1112 | TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst); | |
1113 | ||
1114 | /* | |
1115 | * Perform a 3-operand VFP data processing instruction. fn is the | |
1116 | * callback to do the actual operation; this function deals with the | |
1117 | * code to handle looping around for VFP vector processing. | |
1118 | */ | |
1119 | static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, | |
1120 | int vd, int vn, int vm, bool reads_vd) | |
1121 | { | |
1122 | uint32_t delta_m = 0; | |
1123 | uint32_t delta_d = 0; | |
1124 | uint32_t bank_mask = 0; | |
1125 | int veclen = s->vec_len; | |
1126 | TCGv_i32 f0, f1, fd; | |
1127 | TCGv_ptr fpst; | |
1128 | ||
1129 | if (!dc_isar_feature(aa32_fpshvec, s) && | |
1130 | (veclen != 0 || s->vec_stride != 0)) { | |
1131 | return false; | |
1132 | } | |
1133 | ||
1134 | if (!vfp_access_check(s)) { | |
1135 | return true; | |
1136 | } | |
1137 | ||
1138 | if (veclen > 0) { | |
1139 | bank_mask = 0x18; | |
1140 | ||
1141 | /* Figure out what type of vector operation this is. */ | |
1142 | if ((vd & bank_mask) == 0) { | |
1143 | /* scalar */ | |
1144 | veclen = 0; | |
1145 | } else { | |
1146 | delta_d = s->vec_stride + 1; | |
1147 | ||
1148 | if ((vm & bank_mask) == 0) { | |
1149 | /* mixed scalar/vector */ | |
1150 | delta_m = 0; | |
1151 | } else { | |
1152 | /* vector */ | |
1153 | delta_m = delta_d; | |
1154 | } | |
1155 | } | |
1156 | } | |
1157 | ||
1158 | f0 = tcg_temp_new_i32(); | |
1159 | f1 = tcg_temp_new_i32(); | |
1160 | fd = tcg_temp_new_i32(); | |
1161 | fpst = get_fpstatus_ptr(0); | |
1162 | ||
1163 | neon_load_reg32(f0, vn); | |
1164 | neon_load_reg32(f1, vm); | |
1165 | ||
1166 | for (;;) { | |
1167 | if (reads_vd) { | |
1168 | neon_load_reg32(fd, vd); | |
1169 | } | |
1170 | fn(fd, f0, f1, fpst); | |
1171 | neon_store_reg32(fd, vd); | |
1172 | ||
1173 | if (veclen == 0) { | |
1174 | break; | |
1175 | } | |
1176 | ||
1177 | /* Set up the operands for the next iteration */ | |
1178 | veclen--; | |
1179 | vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); | |
1180 | vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask); | |
1181 | neon_load_reg32(f0, vn); | |
1182 | if (delta_m) { | |
1183 | vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); | |
1184 | neon_load_reg32(f1, vm); | |
1185 | } | |
1186 | } | |
1187 | ||
1188 | tcg_temp_free_i32(f0); | |
1189 | tcg_temp_free_i32(f1); | |
1190 | tcg_temp_free_i32(fd); | |
1191 | tcg_temp_free_ptr(fpst); | |
1192 | ||
1193 | return true; | |
1194 | } | |
1195 | ||
1196 | static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, | |
1197 | int vd, int vn, int vm, bool reads_vd) | |
1198 | { | |
1199 | uint32_t delta_m = 0; | |
1200 | uint32_t delta_d = 0; | |
1201 | uint32_t bank_mask = 0; | |
1202 | int veclen = s->vec_len; | |
1203 | TCGv_i64 f0, f1, fd; | |
1204 | TCGv_ptr fpst; | |
1205 | ||
1206 | /* UNDEF accesses to D16-D31 if they don't exist */ | |
1207 | if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vn | vm) & 0x10)) { | |
1208 | return false; | |
1209 | } | |
1210 | ||
1211 | if (!dc_isar_feature(aa32_fpshvec, s) && | |
1212 | (veclen != 0 || s->vec_stride != 0)) { | |
1213 | return false; | |
1214 | } | |
1215 | ||
1216 | if (!vfp_access_check(s)) { | |
1217 | return true; | |
1218 | } | |
1219 | ||
1220 | if (veclen > 0) { | |
1221 | bank_mask = 0xc; | |
1222 | ||
1223 | /* Figure out what type of vector operation this is. */ | |
1224 | if ((vd & bank_mask) == 0) { | |
1225 | /* scalar */ | |
1226 | veclen = 0; | |
1227 | } else { | |
1228 | delta_d = (s->vec_stride >> 1) + 1; | |
1229 | ||
1230 | if ((vm & bank_mask) == 0) { | |
1231 | /* mixed scalar/vector */ | |
1232 | delta_m = 0; | |
1233 | } else { | |
1234 | /* vector */ | |
1235 | delta_m = delta_d; | |
1236 | } | |
1237 | } | |
1238 | } | |
1239 | ||
1240 | f0 = tcg_temp_new_i64(); | |
1241 | f1 = tcg_temp_new_i64(); | |
1242 | fd = tcg_temp_new_i64(); | |
1243 | fpst = get_fpstatus_ptr(0); | |
1244 | ||
1245 | neon_load_reg64(f0, vn); | |
1246 | neon_load_reg64(f1, vm); | |
1247 | ||
1248 | for (;;) { | |
1249 | if (reads_vd) { | |
1250 | neon_load_reg64(fd, vd); | |
1251 | } | |
1252 | fn(fd, f0, f1, fpst); | |
1253 | neon_store_reg64(fd, vd); | |
1254 | ||
1255 | if (veclen == 0) { | |
1256 | break; | |
1257 | } | |
1258 | /* Set up the operands for the next iteration */ | |
1259 | veclen--; | |
1260 | vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); | |
1261 | vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask); | |
1262 | neon_load_reg64(f0, vn); | |
1263 | if (delta_m) { | |
1264 | vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); | |
1265 | neon_load_reg64(f1, vm); | |
1266 | } | |
1267 | } | |
1268 | ||
1269 | tcg_temp_free_i64(f0); | |
1270 | tcg_temp_free_i64(f1); | |
1271 | tcg_temp_free_i64(fd); | |
1272 | tcg_temp_free_ptr(fpst); | |
1273 | ||
1274 | return true; | |
1275 | } | |
1276 | ||
1277 | static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) | |
1278 | { | |
1279 | /* Note that order of inputs to the add matters for NaNs */ | |
1280 | TCGv_i32 tmp = tcg_temp_new_i32(); | |
1281 | ||
1282 | gen_helper_vfp_muls(tmp, vn, vm, fpst); | |
1283 | gen_helper_vfp_adds(vd, vd, tmp, fpst); | |
1284 | tcg_temp_free_i32(tmp); | |
1285 | } | |
1286 | ||
1287 | static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a) | |
1288 | { | |
1289 | return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true); | |
1290 | } | |
1291 | ||
1292 | static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) | |
1293 | { | |
1294 | /* Note that order of inputs to the add matters for NaNs */ | |
1295 | TCGv_i64 tmp = tcg_temp_new_i64(); | |
1296 | ||
1297 | gen_helper_vfp_muld(tmp, vn, vm, fpst); | |
1298 | gen_helper_vfp_addd(vd, vd, tmp, fpst); | |
1299 | tcg_temp_free_i64(tmp); | |
1300 | } | |
1301 | ||
1302 | static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_sp *a) | |
1303 | { | |
1304 | return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true); | |
1305 | } | |
e7258280 PM |
1306 | |
1307 | static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) | |
1308 | { | |
1309 | /* | |
1310 | * VMLS: vd = vd + -(vn * vm) | |
1311 | * Note that order of inputs to the add matters for NaNs. | |
1312 | */ | |
1313 | TCGv_i32 tmp = tcg_temp_new_i32(); | |
1314 | ||
1315 | gen_helper_vfp_muls(tmp, vn, vm, fpst); | |
1316 | gen_helper_vfp_negs(tmp, tmp); | |
1317 | gen_helper_vfp_adds(vd, vd, tmp, fpst); | |
1318 | tcg_temp_free_i32(tmp); | |
1319 | } | |
1320 | ||
1321 | static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a) | |
1322 | { | |
1323 | return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true); | |
1324 | } | |
1325 | ||
1326 | static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) | |
1327 | { | |
1328 | /* | |
1329 | * VMLS: vd = vd + -(vn * vm) | |
1330 | * Note that order of inputs to the add matters for NaNs. | |
1331 | */ | |
1332 | TCGv_i64 tmp = tcg_temp_new_i64(); | |
1333 | ||
1334 | gen_helper_vfp_muld(tmp, vn, vm, fpst); | |
1335 | gen_helper_vfp_negd(tmp, tmp); | |
1336 | gen_helper_vfp_addd(vd, vd, tmp, fpst); | |
1337 | tcg_temp_free_i64(tmp); | |
1338 | } | |
1339 | ||
1340 | static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_sp *a) | |
1341 | { | |
1342 | return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true); | |
1343 | } | |
c54a416c PM |
1344 | |
1345 | static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) | |
1346 | { | |
1347 | /* | |
1348 | * VNMLS: -fd + (fn * fm) | |
1349 | * Note that it isn't valid to replace (-A + B) with (B - A) or similar | |
1350 | * plausible looking simplifications because this will give wrong results | |
1351 | * for NaNs. | |
1352 | */ | |
1353 | TCGv_i32 tmp = tcg_temp_new_i32(); | |
1354 | ||
1355 | gen_helper_vfp_muls(tmp, vn, vm, fpst); | |
1356 | gen_helper_vfp_negs(vd, vd); | |
1357 | gen_helper_vfp_adds(vd, vd, tmp, fpst); | |
1358 | tcg_temp_free_i32(tmp); | |
1359 | } | |
1360 | ||
1361 | static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a) | |
1362 | { | |
1363 | return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true); | |
1364 | } | |
1365 | ||
1366 | static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) | |
1367 | { | |
1368 | /* | |
1369 | * VNMLS: -fd + (fn * fm) | |
1370 | * Note that it isn't valid to replace (-A + B) with (B - A) or similar | |
1371 | * plausible looking simplifications because this will give wrong results | |
1372 | * for NaNs. | |
1373 | */ | |
1374 | TCGv_i64 tmp = tcg_temp_new_i64(); | |
1375 | ||
1376 | gen_helper_vfp_muld(tmp, vn, vm, fpst); | |
1377 | gen_helper_vfp_negd(vd, vd); | |
1378 | gen_helper_vfp_addd(vd, vd, tmp, fpst); | |
1379 | tcg_temp_free_i64(tmp); | |
1380 | } | |
1381 | ||
1382 | static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_sp *a) | |
1383 | { | |
1384 | return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true); | |
1385 | } | |
8a483533 PM |
1386 | |
1387 | static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) | |
1388 | { | |
1389 | /* VNMLA: -fd + -(fn * fm) */ | |
1390 | TCGv_i32 tmp = tcg_temp_new_i32(); | |
1391 | ||
1392 | gen_helper_vfp_muls(tmp, vn, vm, fpst); | |
1393 | gen_helper_vfp_negs(tmp, tmp); | |
1394 | gen_helper_vfp_negs(vd, vd); | |
1395 | gen_helper_vfp_adds(vd, vd, tmp, fpst); | |
1396 | tcg_temp_free_i32(tmp); | |
1397 | } | |
1398 | ||
1399 | static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a) | |
1400 | { | |
1401 | return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true); | |
1402 | } | |
1403 | ||
1404 | static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) | |
1405 | { | |
1406 | /* VNMLA: -fd + (fn * fm) */ | |
1407 | TCGv_i64 tmp = tcg_temp_new_i64(); | |
1408 | ||
1409 | gen_helper_vfp_muld(tmp, vn, vm, fpst); | |
1410 | gen_helper_vfp_negd(tmp, tmp); | |
1411 | gen_helper_vfp_negd(vd, vd); | |
1412 | gen_helper_vfp_addd(vd, vd, tmp, fpst); | |
1413 | tcg_temp_free_i64(tmp); | |
1414 | } | |
1415 | ||
1416 | static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_sp *a) | |
1417 | { | |
1418 | return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true); | |
1419 | } | |
88c5188c PM |
1420 | |
1421 | static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a) | |
1422 | { | |
1423 | return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false); | |
1424 | } | |
1425 | ||
1426 | static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_sp *a) | |
1427 | { | |
1428 | return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false); | |
1429 | } | |
43c4be12 PM |
1430 | |
1431 | static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) | |
1432 | { | |
1433 | /* VNMUL: -(fn * fm) */ | |
1434 | gen_helper_vfp_muls(vd, vn, vm, fpst); | |
1435 | gen_helper_vfp_negs(vd, vd); | |
1436 | } | |
1437 | ||
1438 | static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a) | |
1439 | { | |
1440 | return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false); | |
1441 | } | |
1442 | ||
1443 | static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) | |
1444 | { | |
1445 | /* VNMUL: -(fn * fm) */ | |
1446 | gen_helper_vfp_muld(vd, vn, vm, fpst); | |
1447 | gen_helper_vfp_negd(vd, vd); | |
1448 | } | |
1449 | ||
1450 | static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_sp *a) | |
1451 | { | |
1452 | return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false); | |
1453 | } | |
ce28b303 PM |
1454 | |
1455 | static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a) | |
1456 | { | |
1457 | return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false); | |
1458 | } | |
1459 | ||
1460 | static bool trans_VADD_dp(DisasContext *s, arg_VADD_sp *a) | |
1461 | { | |
1462 | return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false); | |
1463 | } |