]>
Commit | Line | Data |
---|---|---|
9a64fbe4 | 1 | /* |
2f5a189c | 2 | * PowerPC memory access emulation helpers for QEMU. |
5fafdf24 | 3 | * |
76a66253 | 4 | * Copyright (c) 2003-2007 Jocelyn Mayer |
9a64fbe4 FB |
5 | * |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
9a64fbe4 | 18 | */ |
db725815 | 19 | |
0d75590d | 20 | #include "qemu/osdep.h" |
3e457172 | 21 | #include "cpu.h" |
63c91552 | 22 | #include "exec/exec-all.h" |
1de7afc9 | 23 | #include "qemu/host-utils.h" |
db725815 | 24 | #include "qemu/main-loop.h" |
2ef6175a | 25 | #include "exec/helper-proto.h" |
0411a972 | 26 | #include "helper_regs.h" |
f08b6170 | 27 | #include "exec/cpu_ldst.h" |
dcb32f1d | 28 | #include "tcg/tcg.h" |
6914bc4f | 29 | #include "internal.h" |
f34ec0f6 | 30 | #include "qemu/atomic128.h" |
3e457172 | 31 | |
5a2c8b9e | 32 | /* #define DEBUG_OP */ |
d12d51d5 | 33 | |
e22c357b DK |
34 | static inline bool needs_byteswap(const CPUPPCState *env) |
35 | { | |
36 | #if defined(TARGET_WORDS_BIGENDIAN) | |
37 | return msr_le; | |
38 | #else | |
39 | return !msr_le; | |
40 | #endif | |
41 | } | |
42 | ||
ff4a62cd AJ |
43 | /*****************************************************************************/ |
44 | /* Memory load and stores */ | |
45 | ||
2f5a189c BS |
46 | static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr, |
47 | target_long arg) | |
ff4a62cd AJ |
48 | { |
49 | #if defined(TARGET_PPC64) | |
e42a61f1 | 50 | if (!msr_is_64bit(env, env->msr)) { |
b327c654 BS |
51 | return (uint32_t)(addr + arg); |
52 | } else | |
ff4a62cd | 53 | #endif |
b327c654 BS |
54 | { |
55 | return addr + arg; | |
56 | } | |
ff4a62cd AJ |
57 | } |
58 | ||
bb99b391 RH |
59 | static void *probe_contiguous(CPUPPCState *env, target_ulong addr, uint32_t nb, |
60 | MMUAccessType access_type, int mmu_idx, | |
61 | uintptr_t raddr) | |
62 | { | |
63 | void *host1, *host2; | |
64 | uint32_t nb_pg1, nb_pg2; | |
65 | ||
66 | nb_pg1 = -(addr | TARGET_PAGE_MASK); | |
67 | if (likely(nb <= nb_pg1)) { | |
68 | /* The entire operation is on a single page. */ | |
69 | return probe_access(env, addr, nb, access_type, mmu_idx, raddr); | |
70 | } | |
71 | ||
72 | /* The operation spans two pages. */ | |
73 | nb_pg2 = nb - nb_pg1; | |
74 | host1 = probe_access(env, addr, nb_pg1, access_type, mmu_idx, raddr); | |
75 | addr = addr_add(env, addr, nb_pg1); | |
76 | host2 = probe_access(env, addr, nb_pg2, access_type, mmu_idx, raddr); | |
77 | ||
78 | /* If the two host pages are contiguous, optimize. */ | |
79 | if (host2 == host1 + nb_pg1) { | |
80 | return host1; | |
81 | } | |
82 | return NULL; | |
83 | } | |
84 | ||
2f5a189c | 85 | void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) |
ff4a62cd | 86 | { |
2ca2ef49 RH |
87 | uintptr_t raddr = GETPC(); |
88 | int mmu_idx = cpu_mmu_index(env, false); | |
89 | void *host = probe_contiguous(env, addr, (32 - reg) * 4, | |
90 | MMU_DATA_LOAD, mmu_idx, raddr); | |
91 | ||
92 | if (likely(host)) { | |
93 | /* Fast path -- the entire operation is in RAM at host. */ | |
94 | for (; reg < 32; reg++) { | |
95 | env->gpr[reg] = (uint32_t)ldl_be_p(host); | |
96 | host += 4; | |
97 | } | |
98 | } else { | |
99 | /* Slow path -- at least some of the operation requires i/o. */ | |
100 | for (; reg < 32; reg++) { | |
101 | env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr); | |
102 | addr = addr_add(env, addr, 4); | |
b327c654 | 103 | } |
ff4a62cd AJ |
104 | } |
105 | } | |
106 | ||
2f5a189c | 107 | void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) |
ff4a62cd | 108 | { |
2ca2ef49 RH |
109 | uintptr_t raddr = GETPC(); |
110 | int mmu_idx = cpu_mmu_index(env, false); | |
111 | void *host = probe_contiguous(env, addr, (32 - reg) * 4, | |
112 | MMU_DATA_STORE, mmu_idx, raddr); | |
113 | ||
114 | if (likely(host)) { | |
115 | /* Fast path -- the entire operation is in RAM at host. */ | |
116 | for (; reg < 32; reg++) { | |
117 | stl_be_p(host, env->gpr[reg]); | |
118 | host += 4; | |
119 | } | |
120 | } else { | |
121 | /* Slow path -- at least some of the operation requires i/o. */ | |
122 | for (; reg < 32; reg++) { | |
123 | cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr); | |
124 | addr = addr_add(env, addr, 4); | |
b327c654 | 125 | } |
ff4a62cd AJ |
126 | } |
127 | } | |
128 | ||
e41029b3 BH |
129 | static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, |
130 | uint32_t reg, uintptr_t raddr) | |
dfbc799d | 131 | { |
bb99b391 RH |
132 | int mmu_idx; |
133 | void *host; | |
134 | uint32_t val; | |
b327c654 | 135 | |
bb99b391 RH |
136 | if (unlikely(nb == 0)) { |
137 | return; | |
dfbc799d | 138 | } |
bb99b391 RH |
139 | |
140 | mmu_idx = cpu_mmu_index(env, false); | |
141 | host = probe_contiguous(env, addr, nb, MMU_DATA_LOAD, mmu_idx, raddr); | |
142 | ||
143 | if (likely(host)) { | |
144 | /* Fast path -- the entire operation is in RAM at host. */ | |
145 | for (; nb > 3; nb -= 4) { | |
146 | env->gpr[reg] = (uint32_t)ldl_be_p(host); | |
147 | reg = (reg + 1) % 32; | |
148 | host += 4; | |
149 | } | |
150 | switch (nb) { | |
151 | default: | |
152 | return; | |
153 | case 1: | |
154 | val = ldub_p(host) << 24; | |
155 | break; | |
156 | case 2: | |
157 | val = lduw_be_p(host) << 16; | |
158 | break; | |
159 | case 3: | |
160 | val = (lduw_be_p(host) << 16) | (ldub_p(host + 2) << 8); | |
161 | break; | |
162 | } | |
163 | } else { | |
164 | /* Slow path -- at least some of the operation requires i/o. */ | |
165 | for (; nb > 3; nb -= 4) { | |
166 | env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr); | |
167 | reg = (reg + 1) % 32; | |
168 | addr = addr_add(env, addr, 4); | |
169 | } | |
170 | switch (nb) { | |
171 | default: | |
172 | return; | |
173 | case 1: | |
174 | val = cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 24; | |
175 | break; | |
176 | case 2: | |
177 | val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16; | |
178 | break; | |
179 | case 3: | |
180 | val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16; | |
181 | addr = addr_add(env, addr, 2); | |
182 | val |= cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 8; | |
183 | break; | |
dfbc799d AJ |
184 | } |
185 | } | |
bb99b391 | 186 | env->gpr[reg] = val; |
dfbc799d | 187 | } |
e41029b3 | 188 | |
bb99b391 RH |
189 | void helper_lsw(CPUPPCState *env, target_ulong addr, |
190 | uint32_t nb, uint32_t reg) | |
e41029b3 BH |
191 | { |
192 | do_lsw(env, addr, nb, reg, GETPC()); | |
193 | } | |
194 | ||
5a2c8b9e DG |
195 | /* |
196 | * PPC32 specification says we must generate an exception if rA is in | |
197 | * the range of registers to be loaded. In an other hand, IBM says | |
198 | * this is valid, but rA won't be loaded. For now, I'll follow the | |
199 | * spec... | |
dfbc799d | 200 | */ |
2f5a189c BS |
201 | void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg, |
202 | uint32_t ra, uint32_t rb) | |
dfbc799d AJ |
203 | { |
204 | if (likely(xer_bc != 0)) { | |
f0704d78 | 205 | int num_used_regs = DIV_ROUND_UP(xer_bc, 4); |
537d3e8e TH |
206 | if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) || |
207 | lsw_reg_in_range(reg, num_used_regs, rb))) { | |
e41029b3 BH |
208 | raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, |
209 | POWERPC_EXCP_INVAL | | |
210 | POWERPC_EXCP_INVAL_LSWX, GETPC()); | |
dfbc799d | 211 | } else { |
e41029b3 | 212 | do_lsw(env, addr, xer_bc, reg, GETPC()); |
dfbc799d AJ |
213 | } |
214 | } | |
215 | } | |
216 | ||
2f5a189c BS |
217 | void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, |
218 | uint32_t reg) | |
dfbc799d | 219 | { |
bb99b391 RH |
220 | uintptr_t raddr = GETPC(); |
221 | int mmu_idx; | |
222 | void *host; | |
223 | uint32_t val; | |
b327c654 | 224 | |
bb99b391 RH |
225 | if (unlikely(nb == 0)) { |
226 | return; | |
dfbc799d | 227 | } |
bb99b391 RH |
228 | |
229 | mmu_idx = cpu_mmu_index(env, false); | |
230 | host = probe_contiguous(env, addr, nb, MMU_DATA_STORE, mmu_idx, raddr); | |
231 | ||
232 | if (likely(host)) { | |
233 | /* Fast path -- the entire operation is in RAM at host. */ | |
234 | for (; nb > 3; nb -= 4) { | |
235 | stl_be_p(host, env->gpr[reg]); | |
236 | reg = (reg + 1) % 32; | |
237 | host += 4; | |
238 | } | |
239 | val = env->gpr[reg]; | |
240 | switch (nb) { | |
241 | case 1: | |
242 | stb_p(host, val >> 24); | |
243 | break; | |
244 | case 2: | |
245 | stw_be_p(host, val >> 16); | |
246 | break; | |
247 | case 3: | |
248 | stw_be_p(host, val >> 16); | |
249 | stb_p(host + 2, val >> 8); | |
250 | break; | |
251 | } | |
252 | } else { | |
253 | for (; nb > 3; nb -= 4) { | |
254 | cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr); | |
255 | reg = (reg + 1) % 32; | |
256 | addr = addr_add(env, addr, 4); | |
257 | } | |
258 | val = env->gpr[reg]; | |
259 | switch (nb) { | |
260 | case 1: | |
261 | cpu_stb_mmuidx_ra(env, addr, val >> 24, mmu_idx, raddr); | |
262 | break; | |
263 | case 2: | |
264 | cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr); | |
265 | break; | |
266 | case 3: | |
267 | cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr); | |
268 | addr = addr_add(env, addr, 2); | |
269 | cpu_stb_mmuidx_ra(env, addr, val >> 8, mmu_idx, raddr); | |
270 | break; | |
a16b45e7 | 271 | } |
dfbc799d AJ |
272 | } |
273 | } | |
274 | ||
50728199 RK |
275 | static void dcbz_common(CPUPPCState *env, target_ulong addr, |
276 | uint32_t opcode, bool epid, uintptr_t retaddr) | |
799a8c8d | 277 | { |
c9f82d01 BH |
278 | target_ulong mask, dcbz_size = env->dcache_line_size; |
279 | uint32_t i; | |
280 | void *haddr; | |
50728199 | 281 | int mmu_idx = epid ? PPC_TLB_EPID_STORE : env->dmmu_idx; |
799a8c8d | 282 | |
414f5d14 | 283 | #if defined(TARGET_PPC64) |
c9f82d01 BH |
284 | /* Check for dcbz vs dcbzl on 970 */ |
285 | if (env->excp_model == POWERPC_EXCP_970 && | |
286 | !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) { | |
8e33944f | 287 | dcbz_size = 32; |
b327c654 | 288 | } |
8e33944f AG |
289 | #endif |
290 | ||
c9f82d01 BH |
291 | /* Align address */ |
292 | mask = ~(dcbz_size - 1); | |
293 | addr &= mask; | |
294 | ||
295 | /* Check reservation */ | |
1cbddf6d | 296 | if ((env->reserve_addr & mask) == addr) { |
c9f82d01 BH |
297 | env->reserve_addr = (target_ulong)-1ULL; |
298 | } | |
8e33944f | 299 | |
c9f82d01 | 300 | /* Try fast path translate */ |
4dcf078f | 301 | haddr = probe_write(env, addr, dcbz_size, mmu_idx, retaddr); |
c9f82d01 BH |
302 | if (haddr) { |
303 | memset(haddr, 0, dcbz_size); | |
304 | } else { | |
305 | /* Slow path */ | |
306 | for (i = 0; i < dcbz_size; i += 8) { | |
5a376e4f | 307 | cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr); |
c9f82d01 BH |
308 | } |
309 | } | |
799a8c8d AJ |
310 | } |
311 | ||
50728199 RK |
312 | void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode) |
313 | { | |
314 | dcbz_common(env, addr, opcode, false, GETPC()); | |
315 | } | |
316 | ||
317 | void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode) | |
318 | { | |
319 | dcbz_common(env, addr, opcode, true, GETPC()); | |
320 | } | |
321 | ||
2f5a189c | 322 | void helper_icbi(CPUPPCState *env, target_ulong addr) |
37d269df | 323 | { |
76db3ba4 | 324 | addr &= ~(env->dcache_line_size - 1); |
5a2c8b9e DG |
325 | /* |
326 | * Invalidate one cache line : | |
37d269df AJ |
327 | * PowerPC specification says this is to be treated like a load |
328 | * (not a fetch) by the MMU. To be sure it will be so, | |
329 | * do the load "by hand". | |
330 | */ | |
af6d376e | 331 | cpu_ldl_data_ra(env, addr, GETPC()); |
37d269df AJ |
332 | } |
333 | ||
50728199 RK |
334 | void helper_icbiep(CPUPPCState *env, target_ulong addr) |
335 | { | |
336 | #if !defined(CONFIG_USER_ONLY) | |
337 | /* See comments above */ | |
338 | addr &= ~(env->dcache_line_size - 1); | |
5a376e4f | 339 | cpu_ldl_mmuidx_ra(env, addr, PPC_TLB_EPID_LOAD, GETPC()); |
50728199 RK |
340 | #endif |
341 | } | |
342 | ||
b327c654 | 343 | /* XXX: to be tested */ |
2f5a189c BS |
344 | target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, |
345 | uint32_t ra, uint32_t rb) | |
bdb4b689 AJ |
346 | { |
347 | int i, c, d; | |
b327c654 | 348 | |
bdb4b689 AJ |
349 | d = 24; |
350 | for (i = 0; i < xer_bc; i++) { | |
b00a3b36 | 351 | c = cpu_ldub_data_ra(env, addr, GETPC()); |
2f5a189c | 352 | addr = addr_add(env, addr, 1); |
bdb4b689 AJ |
353 | /* ra (if not 0) and rb are never modified */ |
354 | if (likely(reg != rb && (ra == 0 || reg != ra))) { | |
355 | env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d); | |
356 | } | |
b327c654 | 357 | if (unlikely(c == xer_cmp)) { |
bdb4b689 | 358 | break; |
b327c654 | 359 | } |
bdb4b689 AJ |
360 | if (likely(d != 0)) { |
361 | d -= 8; | |
362 | } else { | |
363 | d = 24; | |
364 | reg++; | |
365 | reg = reg & 0x1F; | |
366 | } | |
367 | } | |
368 | return i; | |
369 | } | |
370 | ||
f34ec0f6 | 371 | #ifdef TARGET_PPC64 |
94bf2658 RH |
372 | uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr, |
373 | uint32_t opidx) | |
374 | { | |
f34ec0f6 RH |
375 | Int128 ret; |
376 | ||
377 | /* We will have raised EXCP_ATOMIC from the translator. */ | |
378 | assert(HAVE_ATOMIC128); | |
379 | ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC()); | |
94bf2658 RH |
380 | env->retxh = int128_gethi(ret); |
381 | return int128_getlo(ret); | |
382 | } | |
383 | ||
384 | uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr, | |
385 | uint32_t opidx) | |
386 | { | |
f34ec0f6 RH |
387 | Int128 ret; |
388 | ||
389 | /* We will have raised EXCP_ATOMIC from the translator. */ | |
390 | assert(HAVE_ATOMIC128); | |
391 | ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC()); | |
94bf2658 RH |
392 | env->retxh = int128_gethi(ret); |
393 | return int128_getlo(ret); | |
394 | } | |
f89ced5f RH |
395 | |
396 | void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr, | |
397 | uint64_t lo, uint64_t hi, uint32_t opidx) | |
398 | { | |
f34ec0f6 RH |
399 | Int128 val; |
400 | ||
401 | /* We will have raised EXCP_ATOMIC from the translator. */ | |
402 | assert(HAVE_ATOMIC128); | |
403 | val = int128_make128(lo, hi); | |
f89ced5f RH |
404 | helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC()); |
405 | } | |
406 | ||
407 | void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr, | |
408 | uint64_t lo, uint64_t hi, uint32_t opidx) | |
409 | { | |
f34ec0f6 RH |
410 | Int128 val; |
411 | ||
412 | /* We will have raised EXCP_ATOMIC from the translator. */ | |
413 | assert(HAVE_ATOMIC128); | |
414 | val = int128_make128(lo, hi); | |
f89ced5f RH |
415 | helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC()); |
416 | } | |
4a9b3c5d RH |
417 | |
418 | uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr, | |
419 | uint64_t new_lo, uint64_t new_hi, | |
420 | uint32_t opidx) | |
421 | { | |
422 | bool success = false; | |
423 | ||
f34ec0f6 RH |
424 | /* We will have raised EXCP_ATOMIC from the translator. */ |
425 | assert(HAVE_CMPXCHG128); | |
426 | ||
4a9b3c5d RH |
427 | if (likely(addr == env->reserve_addr)) { |
428 | Int128 oldv, cmpv, newv; | |
429 | ||
430 | cmpv = int128_make128(env->reserve_val2, env->reserve_val); | |
431 | newv = int128_make128(new_lo, new_hi); | |
432 | oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, | |
433 | opidx, GETPC()); | |
434 | success = int128_eq(oldv, cmpv); | |
435 | } | |
436 | env->reserve_addr = -1; | |
437 | return env->so + success * CRF_EQ_BIT; | |
438 | } | |
439 | ||
440 | uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr, | |
441 | uint64_t new_lo, uint64_t new_hi, | |
442 | uint32_t opidx) | |
443 | { | |
444 | bool success = false; | |
445 | ||
f34ec0f6 RH |
446 | /* We will have raised EXCP_ATOMIC from the translator. */ |
447 | assert(HAVE_CMPXCHG128); | |
448 | ||
4a9b3c5d RH |
449 | if (likely(addr == env->reserve_addr)) { |
450 | Int128 oldv, cmpv, newv; | |
451 | ||
452 | cmpv = int128_make128(env->reserve_val2, env->reserve_val); | |
453 | newv = int128_make128(new_lo, new_hi); | |
454 | oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, | |
455 | opidx, GETPC()); | |
456 | success = int128_eq(oldv, cmpv); | |
457 | } | |
458 | env->reserve_addr = -1; | |
459 | return env->so + success * CRF_EQ_BIT; | |
460 | } | |
94bf2658 RH |
461 | #endif |
462 | ||
d6a46fe8 AJ |
463 | /*****************************************************************************/ |
464 | /* Altivec extension helpers */ | |
e2542fe2 | 465 | #if defined(HOST_WORDS_BIGENDIAN) |
d6a46fe8 AJ |
466 | #define HI_IDX 0 |
467 | #define LO_IDX 1 | |
468 | #else | |
469 | #define HI_IDX 1 | |
470 | #define LO_IDX 0 | |
471 | #endif | |
472 | ||
5a2c8b9e DG |
473 | /* |
474 | * We use msr_le to determine index ordering in a vector. However, | |
475 | * byteswapping is not simply controlled by msr_le. We also need to | |
476 | * take into account endianness of the target. This is done for the | |
477 | * little-endian PPC64 user-mode target. | |
478 | */ | |
e22c357b | 479 | |
cbfb6ae9 | 480 | #define LVE(name, access, swap, element) \ |
2f5a189c BS |
481 | void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ |
482 | target_ulong addr) \ | |
cbfb6ae9 AJ |
483 | { \ |
484 | size_t n_elems = ARRAY_SIZE(r->element); \ | |
5a2c8b9e | 485 | int adjust = HI_IDX * (n_elems - 1); \ |
cbfb6ae9 AJ |
486 | int sh = sizeof(r->element[0]) >> 1; \ |
487 | int index = (addr & 0xf) >> sh; \ | |
b327c654 | 488 | if (msr_le) { \ |
bbfb6f13 | 489 | index = n_elems - index - 1; \ |
e22c357b DK |
490 | } \ |
491 | \ | |
492 | if (needs_byteswap(env)) { \ | |
b327c654 | 493 | r->element[LO_IDX ? index : (adjust - index)] = \ |
bcd510b1 | 494 | swap(access(env, addr, GETPC())); \ |
b327c654 BS |
495 | } else { \ |
496 | r->element[LO_IDX ? index : (adjust - index)] = \ | |
bcd510b1 | 497 | access(env, addr, GETPC()); \ |
b327c654 | 498 | } \ |
cbfb6ae9 AJ |
499 | } |
500 | #define I(x) (x) | |
bcd510b1 BH |
501 | LVE(lvebx, cpu_ldub_data_ra, I, u8) |
502 | LVE(lvehx, cpu_lduw_data_ra, bswap16, u16) | |
503 | LVE(lvewx, cpu_ldl_data_ra, bswap32, u32) | |
cbfb6ae9 AJ |
504 | #undef I |
505 | #undef LVE | |
506 | ||
b327c654 | 507 | #define STVE(name, access, swap, element) \ |
2f5a189c BS |
508 | void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ |
509 | target_ulong addr) \ | |
b327c654 BS |
510 | { \ |
511 | size_t n_elems = ARRAY_SIZE(r->element); \ | |
512 | int adjust = HI_IDX * (n_elems - 1); \ | |
513 | int sh = sizeof(r->element[0]) >> 1; \ | |
514 | int index = (addr & 0xf) >> sh; \ | |
b327c654 | 515 | if (msr_le) { \ |
bbfb6f13 | 516 | index = n_elems - index - 1; \ |
e22c357b DK |
517 | } \ |
518 | \ | |
519 | if (needs_byteswap(env)) { \ | |
2f5a189c | 520 | access(env, addr, swap(r->element[LO_IDX ? index : \ |
bcd510b1 BH |
521 | (adjust - index)]), \ |
522 | GETPC()); \ | |
cbfb6ae9 | 523 | } else { \ |
2f5a189c | 524 | access(env, addr, r->element[LO_IDX ? index : \ |
bcd510b1 | 525 | (adjust - index)], GETPC()); \ |
cbfb6ae9 AJ |
526 | } \ |
527 | } | |
528 | #define I(x) (x) | |
bcd510b1 BH |
529 | STVE(stvebx, cpu_stb_data_ra, I, u8) |
530 | STVE(stvehx, cpu_stw_data_ra, bswap16, u16) | |
531 | STVE(stvewx, cpu_stl_data_ra, bswap32, u32) | |
cbfb6ae9 AJ |
532 | #undef I |
533 | #undef LVE | |
534 | ||
6914bc4f ND |
535 | #ifdef TARGET_PPC64 |
536 | #define GET_NB(rb) ((rb >> 56) & 0xFF) | |
537 | ||
538 | #define VSX_LXVL(name, lj) \ | |
539 | void helper_##name(CPUPPCState *env, target_ulong addr, \ | |
2aba168e | 540 | ppc_vsr_t *xt, target_ulong rb) \ |
6914bc4f | 541 | { \ |
2a175830 | 542 | ppc_vsr_t t; \ |
6914bc4f | 543 | uint64_t nb = GET_NB(rb); \ |
2a175830 | 544 | int i; \ |
6914bc4f | 545 | \ |
2a175830 | 546 | t.s128 = int128_zero(); \ |
6914bc4f ND |
547 | if (nb) { \ |
548 | nb = (nb >= 16) ? 16 : nb; \ | |
549 | if (msr_le && !lj) { \ | |
550 | for (i = 16; i > 16 - nb; i--) { \ | |
2a175830 | 551 | t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \ |
6914bc4f ND |
552 | addr = addr_add(env, addr, 1); \ |
553 | } \ | |
554 | } else { \ | |
555 | for (i = 0; i < nb; i++) { \ | |
2a175830 | 556 | t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \ |
6914bc4f ND |
557 | addr = addr_add(env, addr, 1); \ |
558 | } \ | |
559 | } \ | |
560 | } \ | |
2a175830 | 561 | *xt = t; \ |
6914bc4f ND |
562 | } |
563 | ||
564 | VSX_LXVL(lxvl, 0) | |
176e44e7 | 565 | VSX_LXVL(lxvll, 1) |
6914bc4f | 566 | #undef VSX_LXVL |
681c2478 ND |
567 | |
568 | #define VSX_STXVL(name, lj) \ | |
569 | void helper_##name(CPUPPCState *env, target_ulong addr, \ | |
2aba168e | 570 | ppc_vsr_t *xt, target_ulong rb) \ |
681c2478 | 571 | { \ |
681c2478 | 572 | target_ulong nb = GET_NB(rb); \ |
2a175830 | 573 | int i; \ |
681c2478 ND |
574 | \ |
575 | if (!nb) { \ | |
576 | return; \ | |
577 | } \ | |
2a175830 | 578 | \ |
681c2478 ND |
579 | nb = (nb >= 16) ? 16 : nb; \ |
580 | if (msr_le && !lj) { \ | |
581 | for (i = 16; i > 16 - nb; i--) { \ | |
2a175830 | 582 | cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \ |
681c2478 ND |
583 | addr = addr_add(env, addr, 1); \ |
584 | } \ | |
585 | } else { \ | |
586 | for (i = 0; i < nb; i++) { \ | |
2a175830 | 587 | cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \ |
681c2478 ND |
588 | addr = addr_add(env, addr, 1); \ |
589 | } \ | |
590 | } \ | |
591 | } | |
592 | ||
593 | VSX_STXVL(stxvl, 0) | |
e122090d | 594 | VSX_STXVL(stxvll, 1) |
681c2478 | 595 | #undef VSX_STXVL |
6914bc4f ND |
596 | #undef GET_NB |
597 | #endif /* TARGET_PPC64 */ | |
598 | ||
d6a46fe8 AJ |
599 | #undef HI_IDX |
600 | #undef LO_IDX | |
0ff93d11 TM |
601 | |
602 | void helper_tbegin(CPUPPCState *env) | |
603 | { | |
5a2c8b9e DG |
604 | /* |
605 | * As a degenerate implementation, always fail tbegin. The reason | |
0ff93d11 TM |
606 | * given is "Nesting overflow". The "persistent" bit is set, |
607 | * providing a hint to the error handler to not retry. The TFIAR | |
608 | * captures the address of the failure, which is this tbegin | |
5a2c8b9e DG |
609 | * instruction. Instruction execution will continue with the next |
610 | * instruction in memory, which is precisely what we want. | |
0ff93d11 TM |
611 | */ |
612 | ||
613 | env->spr[SPR_TEXASR] = | |
614 | (1ULL << TEXASR_FAILURE_PERSISTENT) | | |
615 | (1ULL << TEXASR_NESTING_OVERFLOW) | | |
616 | (msr_hv << TEXASR_PRIVILEGE_HV) | | |
617 | (msr_pr << TEXASR_PRIVILEGE_PR) | | |
618 | (1ULL << TEXASR_FAILURE_SUMMARY) | | |
619 | (1ULL << TEXASR_TFIAR_EXACT); | |
620 | env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr; | |
621 | env->spr[SPR_TFHAR] = env->nip + 4; | |
622 | env->crf[0] = 0xB; /* 0b1010 = transaction failure */ | |
623 | } |