]>
Commit | Line | Data |
---|---|---|
9a64fbe4 | 1 | /* |
2f5a189c | 2 | * PowerPC memory access emulation helpers for QEMU. |
5fafdf24 | 3 | * |
76a66253 | 4 | * Copyright (c) 2003-2007 Jocelyn Mayer |
9a64fbe4 FB |
5 | * |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
9a64fbe4 | 18 | */ |
db725815 | 19 | |
0d75590d | 20 | #include "qemu/osdep.h" |
3e457172 | 21 | #include "cpu.h" |
63c91552 | 22 | #include "exec/exec-all.h" |
1de7afc9 | 23 | #include "qemu/host-utils.h" |
db725815 | 24 | #include "qemu/main-loop.h" |
2ef6175a | 25 | #include "exec/helper-proto.h" |
0411a972 | 26 | #include "helper_regs.h" |
f08b6170 | 27 | #include "exec/cpu_ldst.h" |
94bf2658 | 28 | #include "tcg.h" |
6914bc4f | 29 | #include "internal.h" |
f34ec0f6 | 30 | #include "qemu/atomic128.h" |
3e457172 | 31 | |
5a2c8b9e | 32 | /* #define DEBUG_OP */ |
d12d51d5 | 33 | |
e22c357b DK |
34 | static inline bool needs_byteswap(const CPUPPCState *env) |
35 | { | |
36 | #if defined(TARGET_WORDS_BIGENDIAN) | |
37 | return msr_le; | |
38 | #else | |
39 | return !msr_le; | |
40 | #endif | |
41 | } | |
42 | ||
ff4a62cd AJ |
43 | /*****************************************************************************/ |
44 | /* Memory load and stores */ | |
45 | ||
2f5a189c BS |
46 | static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr, |
47 | target_long arg) | |
ff4a62cd AJ |
48 | { |
49 | #if defined(TARGET_PPC64) | |
e42a61f1 | 50 | if (!msr_is_64bit(env, env->msr)) { |
b327c654 BS |
51 | return (uint32_t)(addr + arg); |
52 | } else | |
ff4a62cd | 53 | #endif |
b327c654 BS |
54 | { |
55 | return addr + arg; | |
56 | } | |
ff4a62cd AJ |
57 | } |
58 | ||
2f5a189c | 59 | void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) |
ff4a62cd | 60 | { |
76db3ba4 | 61 | for (; reg < 32; reg++) { |
e22c357b | 62 | if (needs_byteswap(env)) { |
af6d376e | 63 | env->gpr[reg] = bswap32(cpu_ldl_data_ra(env, addr, GETPC())); |
b327c654 | 64 | } else { |
af6d376e | 65 | env->gpr[reg] = cpu_ldl_data_ra(env, addr, GETPC()); |
b327c654 | 66 | } |
2f5a189c | 67 | addr = addr_add(env, addr, 4); |
ff4a62cd AJ |
68 | } |
69 | } | |
70 | ||
2f5a189c | 71 | void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) |
ff4a62cd | 72 | { |
76db3ba4 | 73 | for (; reg < 32; reg++) { |
e22c357b | 74 | if (needs_byteswap(env)) { |
af6d376e BH |
75 | cpu_stl_data_ra(env, addr, bswap32((uint32_t)env->gpr[reg]), |
76 | GETPC()); | |
b327c654 | 77 | } else { |
af6d376e | 78 | cpu_stl_data_ra(env, addr, (uint32_t)env->gpr[reg], GETPC()); |
b327c654 | 79 | } |
2f5a189c | 80 | addr = addr_add(env, addr, 4); |
ff4a62cd AJ |
81 | } |
82 | } | |
83 | ||
e41029b3 BH |
84 | static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, |
85 | uint32_t reg, uintptr_t raddr) | |
dfbc799d AJ |
86 | { |
87 | int sh; | |
b327c654 | 88 | |
76db3ba4 | 89 | for (; nb > 3; nb -= 4) { |
e41029b3 | 90 | env->gpr[reg] = cpu_ldl_data_ra(env, addr, raddr); |
dfbc799d | 91 | reg = (reg + 1) % 32; |
2f5a189c | 92 | addr = addr_add(env, addr, 4); |
dfbc799d AJ |
93 | } |
94 | if (unlikely(nb > 0)) { | |
95 | env->gpr[reg] = 0; | |
76db3ba4 | 96 | for (sh = 24; nb > 0; nb--, sh -= 8) { |
e41029b3 | 97 | env->gpr[reg] |= cpu_ldub_data_ra(env, addr, raddr) << sh; |
2f5a189c | 98 | addr = addr_add(env, addr, 1); |
dfbc799d AJ |
99 | } |
100 | } | |
101 | } | |
e41029b3 BH |
102 | |
103 | void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg) | |
104 | { | |
105 | do_lsw(env, addr, nb, reg, GETPC()); | |
106 | } | |
107 | ||
5a2c8b9e DG |
108 | /* |
109 | * PPC32 specification says we must generate an exception if rA is in | |
110 | * the range of registers to be loaded. In an other hand, IBM says | |
111 | * this is valid, but rA won't be loaded. For now, I'll follow the | |
112 | * spec... | |
dfbc799d | 113 | */ |
2f5a189c BS |
114 | void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg, |
115 | uint32_t ra, uint32_t rb) | |
dfbc799d AJ |
116 | { |
117 | if (likely(xer_bc != 0)) { | |
f0704d78 | 118 | int num_used_regs = DIV_ROUND_UP(xer_bc, 4); |
537d3e8e TH |
119 | if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) || |
120 | lsw_reg_in_range(reg, num_used_regs, rb))) { | |
e41029b3 BH |
121 | raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, |
122 | POWERPC_EXCP_INVAL | | |
123 | POWERPC_EXCP_INVAL_LSWX, GETPC()); | |
dfbc799d | 124 | } else { |
e41029b3 | 125 | do_lsw(env, addr, xer_bc, reg, GETPC()); |
dfbc799d AJ |
126 | } |
127 | } | |
128 | } | |
129 | ||
2f5a189c BS |
130 | void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, |
131 | uint32_t reg) | |
dfbc799d AJ |
132 | { |
133 | int sh; | |
b327c654 | 134 | |
76db3ba4 | 135 | for (; nb > 3; nb -= 4) { |
e41029b3 | 136 | cpu_stl_data_ra(env, addr, env->gpr[reg], GETPC()); |
dfbc799d | 137 | reg = (reg + 1) % 32; |
2f5a189c | 138 | addr = addr_add(env, addr, 4); |
dfbc799d AJ |
139 | } |
140 | if (unlikely(nb > 0)) { | |
a16b45e7 | 141 | for (sh = 24; nb > 0; nb--, sh -= 8) { |
e41029b3 | 142 | cpu_stb_data_ra(env, addr, (env->gpr[reg] >> sh) & 0xFF, GETPC()); |
2f5a189c | 143 | addr = addr_add(env, addr, 1); |
a16b45e7 | 144 | } |
dfbc799d AJ |
145 | } |
146 | } | |
147 | ||
50728199 RK |
148 | static void dcbz_common(CPUPPCState *env, target_ulong addr, |
149 | uint32_t opcode, bool epid, uintptr_t retaddr) | |
799a8c8d | 150 | { |
c9f82d01 BH |
151 | target_ulong mask, dcbz_size = env->dcache_line_size; |
152 | uint32_t i; | |
153 | void *haddr; | |
50728199 | 154 | int mmu_idx = epid ? PPC_TLB_EPID_STORE : env->dmmu_idx; |
799a8c8d | 155 | |
414f5d14 | 156 | #if defined(TARGET_PPC64) |
c9f82d01 BH |
157 | /* Check for dcbz vs dcbzl on 970 */ |
158 | if (env->excp_model == POWERPC_EXCP_970 && | |
159 | !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) { | |
8e33944f | 160 | dcbz_size = 32; |
b327c654 | 161 | } |
8e33944f AG |
162 | #endif |
163 | ||
c9f82d01 BH |
164 | /* Align address */ |
165 | mask = ~(dcbz_size - 1); | |
166 | addr &= mask; | |
167 | ||
168 | /* Check reservation */ | |
169 | if ((env->reserve_addr & mask) == (addr & mask)) { | |
170 | env->reserve_addr = (target_ulong)-1ULL; | |
171 | } | |
8e33944f | 172 | |
c9f82d01 | 173 | /* Try fast path translate */ |
50728199 | 174 | haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, mmu_idx); |
c9f82d01 BH |
175 | if (haddr) { |
176 | memset(haddr, 0, dcbz_size); | |
177 | } else { | |
178 | /* Slow path */ | |
179 | for (i = 0; i < dcbz_size; i += 8) { | |
50728199 RK |
180 | if (epid) { |
181 | #if !defined(CONFIG_USER_ONLY) | |
182 | /* Does not make sense on USER_ONLY config */ | |
183 | cpu_stq_eps_ra(env, addr + i, 0, retaddr); | |
184 | #endif | |
185 | } else { | |
186 | cpu_stq_data_ra(env, addr + i, 0, retaddr); | |
187 | } | |
c9f82d01 BH |
188 | } |
189 | } | |
799a8c8d AJ |
190 | } |
191 | ||
50728199 RK |
192 | void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode) |
193 | { | |
194 | dcbz_common(env, addr, opcode, false, GETPC()); | |
195 | } | |
196 | ||
197 | void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode) | |
198 | { | |
199 | dcbz_common(env, addr, opcode, true, GETPC()); | |
200 | } | |
201 | ||
2f5a189c | 202 | void helper_icbi(CPUPPCState *env, target_ulong addr) |
37d269df | 203 | { |
76db3ba4 | 204 | addr &= ~(env->dcache_line_size - 1); |
5a2c8b9e DG |
205 | /* |
206 | * Invalidate one cache line : | |
37d269df AJ |
207 | * PowerPC specification says this is to be treated like a load |
208 | * (not a fetch) by the MMU. To be sure it will be so, | |
209 | * do the load "by hand". | |
210 | */ | |
af6d376e | 211 | cpu_ldl_data_ra(env, addr, GETPC()); |
37d269df AJ |
212 | } |
213 | ||
50728199 RK |
214 | void helper_icbiep(CPUPPCState *env, target_ulong addr) |
215 | { | |
216 | #if !defined(CONFIG_USER_ONLY) | |
217 | /* See comments above */ | |
218 | addr &= ~(env->dcache_line_size - 1); | |
219 | cpu_ldl_epl_ra(env, addr, GETPC()); | |
220 | #endif | |
221 | } | |
222 | ||
b327c654 | 223 | /* XXX: to be tested */ |
2f5a189c BS |
224 | target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, |
225 | uint32_t ra, uint32_t rb) | |
bdb4b689 AJ |
226 | { |
227 | int i, c, d; | |
b327c654 | 228 | |
bdb4b689 AJ |
229 | d = 24; |
230 | for (i = 0; i < xer_bc; i++) { | |
b00a3b36 | 231 | c = cpu_ldub_data_ra(env, addr, GETPC()); |
2f5a189c | 232 | addr = addr_add(env, addr, 1); |
bdb4b689 AJ |
233 | /* ra (if not 0) and rb are never modified */ |
234 | if (likely(reg != rb && (ra == 0 || reg != ra))) { | |
235 | env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d); | |
236 | } | |
b327c654 | 237 | if (unlikely(c == xer_cmp)) { |
bdb4b689 | 238 | break; |
b327c654 | 239 | } |
bdb4b689 AJ |
240 | if (likely(d != 0)) { |
241 | d -= 8; | |
242 | } else { | |
243 | d = 24; | |
244 | reg++; | |
245 | reg = reg & 0x1F; | |
246 | } | |
247 | } | |
248 | return i; | |
249 | } | |
250 | ||
f34ec0f6 | 251 | #ifdef TARGET_PPC64 |
94bf2658 RH |
252 | uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr, |
253 | uint32_t opidx) | |
254 | { | |
f34ec0f6 RH |
255 | Int128 ret; |
256 | ||
257 | /* We will have raised EXCP_ATOMIC from the translator. */ | |
258 | assert(HAVE_ATOMIC128); | |
259 | ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC()); | |
94bf2658 RH |
260 | env->retxh = int128_gethi(ret); |
261 | return int128_getlo(ret); | |
262 | } | |
263 | ||
264 | uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr, | |
265 | uint32_t opidx) | |
266 | { | |
f34ec0f6 RH |
267 | Int128 ret; |
268 | ||
269 | /* We will have raised EXCP_ATOMIC from the translator. */ | |
270 | assert(HAVE_ATOMIC128); | |
271 | ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC()); | |
94bf2658 RH |
272 | env->retxh = int128_gethi(ret); |
273 | return int128_getlo(ret); | |
274 | } | |
f89ced5f RH |
275 | |
276 | void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr, | |
277 | uint64_t lo, uint64_t hi, uint32_t opidx) | |
278 | { | |
f34ec0f6 RH |
279 | Int128 val; |
280 | ||
281 | /* We will have raised EXCP_ATOMIC from the translator. */ | |
282 | assert(HAVE_ATOMIC128); | |
283 | val = int128_make128(lo, hi); | |
f89ced5f RH |
284 | helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC()); |
285 | } | |
286 | ||
287 | void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr, | |
288 | uint64_t lo, uint64_t hi, uint32_t opidx) | |
289 | { | |
f34ec0f6 RH |
290 | Int128 val; |
291 | ||
292 | /* We will have raised EXCP_ATOMIC from the translator. */ | |
293 | assert(HAVE_ATOMIC128); | |
294 | val = int128_make128(lo, hi); | |
f89ced5f RH |
295 | helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC()); |
296 | } | |
4a9b3c5d RH |
297 | |
298 | uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr, | |
299 | uint64_t new_lo, uint64_t new_hi, | |
300 | uint32_t opidx) | |
301 | { | |
302 | bool success = false; | |
303 | ||
f34ec0f6 RH |
304 | /* We will have raised EXCP_ATOMIC from the translator. */ |
305 | assert(HAVE_CMPXCHG128); | |
306 | ||
4a9b3c5d RH |
307 | if (likely(addr == env->reserve_addr)) { |
308 | Int128 oldv, cmpv, newv; | |
309 | ||
310 | cmpv = int128_make128(env->reserve_val2, env->reserve_val); | |
311 | newv = int128_make128(new_lo, new_hi); | |
312 | oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, | |
313 | opidx, GETPC()); | |
314 | success = int128_eq(oldv, cmpv); | |
315 | } | |
316 | env->reserve_addr = -1; | |
317 | return env->so + success * CRF_EQ_BIT; | |
318 | } | |
319 | ||
320 | uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr, | |
321 | uint64_t new_lo, uint64_t new_hi, | |
322 | uint32_t opidx) | |
323 | { | |
324 | bool success = false; | |
325 | ||
f34ec0f6 RH |
326 | /* We will have raised EXCP_ATOMIC from the translator. */ |
327 | assert(HAVE_CMPXCHG128); | |
328 | ||
4a9b3c5d RH |
329 | if (likely(addr == env->reserve_addr)) { |
330 | Int128 oldv, cmpv, newv; | |
331 | ||
332 | cmpv = int128_make128(env->reserve_val2, env->reserve_val); | |
333 | newv = int128_make128(new_lo, new_hi); | |
334 | oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, | |
335 | opidx, GETPC()); | |
336 | success = int128_eq(oldv, cmpv); | |
337 | } | |
338 | env->reserve_addr = -1; | |
339 | return env->so + success * CRF_EQ_BIT; | |
340 | } | |
94bf2658 RH |
341 | #endif |
342 | ||
d6a46fe8 AJ |
343 | /*****************************************************************************/ |
344 | /* Altivec extension helpers */ | |
e2542fe2 | 345 | #if defined(HOST_WORDS_BIGENDIAN) |
d6a46fe8 AJ |
346 | #define HI_IDX 0 |
347 | #define LO_IDX 1 | |
348 | #else | |
349 | #define HI_IDX 1 | |
350 | #define LO_IDX 0 | |
351 | #endif | |
352 | ||
5a2c8b9e DG |
353 | /* |
354 | * We use msr_le to determine index ordering in a vector. However, | |
355 | * byteswapping is not simply controlled by msr_le. We also need to | |
356 | * take into account endianness of the target. This is done for the | |
357 | * little-endian PPC64 user-mode target. | |
358 | */ | |
e22c357b | 359 | |
cbfb6ae9 | 360 | #define LVE(name, access, swap, element) \ |
2f5a189c BS |
361 | void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ |
362 | target_ulong addr) \ | |
cbfb6ae9 AJ |
363 | { \ |
364 | size_t n_elems = ARRAY_SIZE(r->element); \ | |
5a2c8b9e | 365 | int adjust = HI_IDX * (n_elems - 1); \ |
cbfb6ae9 AJ |
366 | int sh = sizeof(r->element[0]) >> 1; \ |
367 | int index = (addr & 0xf) >> sh; \ | |
b327c654 | 368 | if (msr_le) { \ |
bbfb6f13 | 369 | index = n_elems - index - 1; \ |
e22c357b DK |
370 | } \ |
371 | \ | |
372 | if (needs_byteswap(env)) { \ | |
b327c654 | 373 | r->element[LO_IDX ? index : (adjust - index)] = \ |
bcd510b1 | 374 | swap(access(env, addr, GETPC())); \ |
b327c654 BS |
375 | } else { \ |
376 | r->element[LO_IDX ? index : (adjust - index)] = \ | |
bcd510b1 | 377 | access(env, addr, GETPC()); \ |
b327c654 | 378 | } \ |
cbfb6ae9 AJ |
379 | } |
380 | #define I(x) (x) | |
bcd510b1 BH |
381 | LVE(lvebx, cpu_ldub_data_ra, I, u8) |
382 | LVE(lvehx, cpu_lduw_data_ra, bswap16, u16) | |
383 | LVE(lvewx, cpu_ldl_data_ra, bswap32, u32) | |
cbfb6ae9 AJ |
384 | #undef I |
385 | #undef LVE | |
386 | ||
b327c654 | 387 | #define STVE(name, access, swap, element) \ |
2f5a189c BS |
388 | void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ |
389 | target_ulong addr) \ | |
b327c654 BS |
390 | { \ |
391 | size_t n_elems = ARRAY_SIZE(r->element); \ | |
392 | int adjust = HI_IDX * (n_elems - 1); \ | |
393 | int sh = sizeof(r->element[0]) >> 1; \ | |
394 | int index = (addr & 0xf) >> sh; \ | |
b327c654 | 395 | if (msr_le) { \ |
bbfb6f13 | 396 | index = n_elems - index - 1; \ |
e22c357b DK |
397 | } \ |
398 | \ | |
399 | if (needs_byteswap(env)) { \ | |
2f5a189c | 400 | access(env, addr, swap(r->element[LO_IDX ? index : \ |
bcd510b1 BH |
401 | (adjust - index)]), \ |
402 | GETPC()); \ | |
cbfb6ae9 | 403 | } else { \ |
2f5a189c | 404 | access(env, addr, r->element[LO_IDX ? index : \ |
bcd510b1 | 405 | (adjust - index)], GETPC()); \ |
cbfb6ae9 AJ |
406 | } \ |
407 | } | |
408 | #define I(x) (x) | |
bcd510b1 BH |
409 | STVE(stvebx, cpu_stb_data_ra, I, u8) |
410 | STVE(stvehx, cpu_stw_data_ra, bswap16, u16) | |
411 | STVE(stvewx, cpu_stl_data_ra, bswap32, u32) | |
cbfb6ae9 AJ |
412 | #undef I |
413 | #undef LVE | |
414 | ||
6914bc4f ND |
415 | #ifdef TARGET_PPC64 |
416 | #define GET_NB(rb) ((rb >> 56) & 0xFF) | |
417 | ||
418 | #define VSX_LXVL(name, lj) \ | |
419 | void helper_##name(CPUPPCState *env, target_ulong addr, \ | |
2aba168e | 420 | ppc_vsr_t *xt, target_ulong rb) \ |
6914bc4f | 421 | { \ |
2a175830 | 422 | ppc_vsr_t t; \ |
6914bc4f | 423 | uint64_t nb = GET_NB(rb); \ |
2a175830 | 424 | int i; \ |
6914bc4f | 425 | \ |
2a175830 | 426 | t.s128 = int128_zero(); \ |
6914bc4f ND |
427 | if (nb) { \ |
428 | nb = (nb >= 16) ? 16 : nb; \ | |
429 | if (msr_le && !lj) { \ | |
430 | for (i = 16; i > 16 - nb; i--) { \ | |
2a175830 | 431 | t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \ |
6914bc4f ND |
432 | addr = addr_add(env, addr, 1); \ |
433 | } \ | |
434 | } else { \ | |
435 | for (i = 0; i < nb; i++) { \ | |
2a175830 | 436 | t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \ |
6914bc4f ND |
437 | addr = addr_add(env, addr, 1); \ |
438 | } \ | |
439 | } \ | |
440 | } \ | |
2a175830 | 441 | *xt = t; \ |
6914bc4f ND |
442 | } |
443 | ||
444 | VSX_LXVL(lxvl, 0) | |
176e44e7 | 445 | VSX_LXVL(lxvll, 1) |
6914bc4f | 446 | #undef VSX_LXVL |
681c2478 ND |
447 | |
448 | #define VSX_STXVL(name, lj) \ | |
449 | void helper_##name(CPUPPCState *env, target_ulong addr, \ | |
2aba168e | 450 | ppc_vsr_t *xt, target_ulong rb) \ |
681c2478 | 451 | { \ |
681c2478 | 452 | target_ulong nb = GET_NB(rb); \ |
2a175830 | 453 | int i; \ |
681c2478 ND |
454 | \ |
455 | if (!nb) { \ | |
456 | return; \ | |
457 | } \ | |
2a175830 | 458 | \ |
681c2478 ND |
459 | nb = (nb >= 16) ? 16 : nb; \ |
460 | if (msr_le && !lj) { \ | |
461 | for (i = 16; i > 16 - nb; i--) { \ | |
2a175830 | 462 | cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \ |
681c2478 ND |
463 | addr = addr_add(env, addr, 1); \ |
464 | } \ | |
465 | } else { \ | |
466 | for (i = 0; i < nb; i++) { \ | |
2a175830 | 467 | cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \ |
681c2478 ND |
468 | addr = addr_add(env, addr, 1); \ |
469 | } \ | |
470 | } \ | |
471 | } | |
472 | ||
473 | VSX_STXVL(stxvl, 0) | |
e122090d | 474 | VSX_STXVL(stxvll, 1) |
681c2478 | 475 | #undef VSX_STXVL |
6914bc4f ND |
476 | #undef GET_NB |
477 | #endif /* TARGET_PPC64 */ | |
478 | ||
d6a46fe8 AJ |
479 | #undef HI_IDX |
480 | #undef LO_IDX | |
0ff93d11 TM |
481 | |
482 | void helper_tbegin(CPUPPCState *env) | |
483 | { | |
5a2c8b9e DG |
484 | /* |
485 | * As a degenerate implementation, always fail tbegin. The reason | |
0ff93d11 TM |
486 | * given is "Nesting overflow". The "persistent" bit is set, |
487 | * providing a hint to the error handler to not retry. The TFIAR | |
488 | * captures the address of the failure, which is this tbegin | |
5a2c8b9e DG |
489 | * instruction. Instruction execution will continue with the next |
490 | * instruction in memory, which is precisely what we want. | |
0ff93d11 TM |
491 | */ |
492 | ||
493 | env->spr[SPR_TEXASR] = | |
494 | (1ULL << TEXASR_FAILURE_PERSISTENT) | | |
495 | (1ULL << TEXASR_NESTING_OVERFLOW) | | |
496 | (msr_hv << TEXASR_PRIVILEGE_HV) | | |
497 | (msr_pr << TEXASR_PRIVILEGE_PR) | | |
498 | (1ULL << TEXASR_FAILURE_SUMMARY) | | |
499 | (1ULL << TEXASR_TFIAR_EXACT); | |
500 | env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr; | |
501 | env->spr[SPR_TFHAR] = env->nip + 4; | |
502 | env->crf[0] = 0xB; /* 0b1010 = transaction failure */ | |
503 | } |