]>
Commit | Line | Data |
---|---|---|
b92e5a22 FB |
1 | /* |
2 | * Software MMU support | |
5fafdf24 | 3 | * |
efbf29b6 BS |
4 | * Generate helpers used by TCG for qemu_ld/st ops and code load |
5 | * functions. | |
6 | * | |
7 | * Included from target op helpers and exec.c. | |
8 | * | |
b92e5a22 FB |
9 | * Copyright (c) 2003 Fabrice Bellard |
10 | * | |
11 | * This library is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU Lesser General Public | |
13 | * License as published by the Free Software Foundation; either | |
14 | * version 2 of the License, or (at your option) any later version. | |
15 | * | |
16 | * This library is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * Lesser General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 22 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
b92e5a22 | 23 | */ |
1de7afc9 | 24 | #include "qemu/timer.h" |
77717094 | 25 | #include "exec/address-spaces.h" |
022c62cb | 26 | #include "exec/memory.h" |
29e922b6 | 27 | |
b92e5a22 FB |
28 | #define DATA_SIZE (1 << SHIFT) |
29 | ||
30 | #if DATA_SIZE == 8 | |
31 | #define SUFFIX q | |
701e3a5c | 32 | #define LSUFFIX q |
c8f94df5 | 33 | #define SDATA_TYPE int64_t |
dc9a353c | 34 | #define DATA_TYPE uint64_t |
b92e5a22 FB |
35 | #elif DATA_SIZE == 4 |
36 | #define SUFFIX l | |
701e3a5c | 37 | #define LSUFFIX l |
c8f94df5 | 38 | #define SDATA_TYPE int32_t |
dc9a353c | 39 | #define DATA_TYPE uint32_t |
b92e5a22 FB |
40 | #elif DATA_SIZE == 2 |
41 | #define SUFFIX w | |
701e3a5c | 42 | #define LSUFFIX uw |
c8f94df5 | 43 | #define SDATA_TYPE int16_t |
dc9a353c | 44 | #define DATA_TYPE uint16_t |
b92e5a22 FB |
45 | #elif DATA_SIZE == 1 |
46 | #define SUFFIX b | |
701e3a5c | 47 | #define LSUFFIX ub |
c8f94df5 | 48 | #define SDATA_TYPE int8_t |
dc9a353c | 49 | #define DATA_TYPE uint8_t |
b92e5a22 FB |
50 | #else |
51 | #error unsupported data size | |
52 | #endif | |
53 | ||
c8f94df5 RH |
54 | |
55 | /* For the benefit of TCG generated code, we want to avoid the complication | |
56 | of ABI-specific return type promotion and always return a value extended | |
57 | to the register size of the host. This is tcg_target_long, except in the | |
58 | case of a 32-bit host and 64-bit data, and for that we always have | |
59 | uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */ | |
60 | #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8 | |
61 | # define WORD_TYPE DATA_TYPE | |
62 | # define USUFFIX SUFFIX | |
63 | #else | |
64 | # define WORD_TYPE tcg_target_ulong | |
65 | # define USUFFIX glue(u, SUFFIX) | |
66 | # define SSUFFIX glue(s, SUFFIX) | |
67 | #endif | |
68 | ||
b769d8fe | 69 | #ifdef SOFTMMU_CODE_ACCESS |
55e94093 | 70 | #define READ_ACCESS_TYPE MMU_INST_FETCH |
84b7b8e7 | 71 | #define ADDR_READ addr_code |
b769d8fe | 72 | #else |
55e94093 | 73 | #define READ_ACCESS_TYPE MMU_DATA_LOAD |
84b7b8e7 | 74 | #define ADDR_READ addr_read |
b769d8fe FB |
75 | #endif |
76 | ||
867b3201 RH |
77 | #if DATA_SIZE == 8 |
78 | # define BSWAP(X) bswap64(X) | |
79 | #elif DATA_SIZE == 4 | |
80 | # define BSWAP(X) bswap32(X) | |
81 | #elif DATA_SIZE == 2 | |
82 | # define BSWAP(X) bswap16(X) | |
83 | #else | |
84 | # define BSWAP(X) (X) | |
85 | #endif | |
86 | ||
87 | #ifdef TARGET_WORDS_BIGENDIAN | |
88 | # define TGT_BE(X) (X) | |
89 | # define TGT_LE(X) BSWAP(X) | |
90 | #else | |
91 | # define TGT_BE(X) BSWAP(X) | |
92 | # define TGT_LE(X) (X) | |
93 | #endif | |
94 | ||
95 | #if DATA_SIZE == 1 | |
96 | # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) | |
97 | # define helper_be_ld_name helper_le_ld_name | |
98 | # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX) | |
99 | # define helper_be_lds_name helper_le_lds_name | |
100 | # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX) | |
101 | # define helper_be_st_name helper_le_st_name | |
102 | #else | |
103 | # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX) | |
104 | # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX) | |
105 | # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX) | |
106 | # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX) | |
107 | # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX) | |
108 | # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX) | |
109 | #endif | |
110 | ||
111 | #ifdef TARGET_WORDS_BIGENDIAN | |
112 | # define helper_te_ld_name helper_be_ld_name | |
113 | # define helper_te_st_name helper_be_st_name | |
114 | #else | |
115 | # define helper_te_ld_name helper_le_ld_name | |
116 | # define helper_te_st_name helper_le_st_name | |
117 | #endif | |
118 | ||
0f590e74 | 119 | #ifndef SOFTMMU_CODE_ACCESS |
89c33337 | 120 | static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, |
e469b22f | 121 | CPUIOTLBEntry *iotlbentry, |
2e70f6ef | 122 | target_ulong addr, |
20503968 | 123 | uintptr_t retaddr) |
b92e5a22 | 124 | { |
791af8c8 | 125 | uint64_t val; |
09daed84 | 126 | CPUState *cpu = ENV_GET_CPU(env); |
e469b22f | 127 | hwaddr physaddr = iotlbentry->addr; |
a54c87b6 | 128 | MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); |
37ec01d4 | 129 | |
0f459d16 | 130 | physaddr = (physaddr & TARGET_PAGE_MASK) + addr; |
93afeade | 131 | cpu->mem_io_pc = retaddr; |
414b15c9 | 132 | if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { |
90b40a69 | 133 | cpu_io_recompile(cpu, retaddr); |
2e70f6ef | 134 | } |
b92e5a22 | 135 | |
93afeade | 136 | cpu->mem_io_vaddr = addr; |
3b643495 | 137 | memory_region_dispatch_read(mr, physaddr, &val, 1 << SHIFT, |
fadc1cbe | 138 | iotlbentry->attrs); |
791af8c8 | 139 | return val; |
b92e5a22 | 140 | } |
0f590e74 | 141 | #endif |
b92e5a22 | 142 | |
3972ef6f RH |
143 | WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, |
144 | TCGMemOpIdx oi, uintptr_t retaddr) | |
b92e5a22 | 145 | { |
3972ef6f | 146 | unsigned mmu_idx = get_mmuidx(oi); |
aac1fb05 RH |
147 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
148 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; | |
1f00b27f | 149 | int a_bits = get_alignment_bits(get_memop(oi)); |
aac1fb05 | 150 | uintptr_t haddr; |
867b3201 | 151 | DATA_TYPE res; |
3b46e624 | 152 | |
0f842f8a RH |
153 | /* Adjust the given return address. */ |
154 | retaddr -= GETPC_ADJ; | |
155 | ||
1f00b27f SS |
156 | if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) { |
157 | cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, | |
158 | mmu_idx, retaddr); | |
159 | } | |
160 | ||
aac1fb05 RH |
161 | /* If the TLB entry is for a different page, reload and try again. */ |
162 | if ((addr & TARGET_PAGE_MASK) | |
163 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
a390284b | 164 | if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { |
88e89a57 XT |
165 | tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, |
166 | mmu_idx, retaddr); | |
167 | } | |
aac1fb05 RH |
168 | tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; |
169 | } | |
170 | ||
171 | /* Handle an IO access. */ | |
172 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { | |
e469b22f | 173 | CPUIOTLBEntry *iotlbentry; |
aac1fb05 RH |
174 | if ((addr & (DATA_SIZE - 1)) != 0) { |
175 | goto do_unaligned_access; | |
b92e5a22 | 176 | } |
e469b22f | 177 | iotlbentry = &env->iotlb[mmu_idx][index]; |
867b3201 RH |
178 | |
179 | /* ??? Note that the io helpers always read data in the target | |
180 | byte ordering. We should push the LE/BE request down into io. */ | |
e469b22f | 181 | res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); |
867b3201 RH |
182 | res = TGT_LE(res); |
183 | return res; | |
aac1fb05 RH |
184 | } |
185 | ||
186 | /* Handle slow unaligned access (it spans two pages or IO). */ | |
187 | if (DATA_SIZE > 1 | |
188 | && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 | |
189 | >= TARGET_PAGE_SIZE)) { | |
190 | target_ulong addr1, addr2; | |
867b3201 | 191 | DATA_TYPE res1, res2; |
aac1fb05 RH |
192 | unsigned shift; |
193 | do_unaligned_access: | |
aac1fb05 RH |
194 | addr1 = addr & ~(DATA_SIZE - 1); |
195 | addr2 = addr1 + DATA_SIZE; | |
0f842f8a RH |
196 | /* Note the adjustment at the beginning of the function. |
197 | Undo that for the recursion. */ | |
3972ef6f RH |
198 | res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ); |
199 | res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ); | |
aac1fb05 | 200 | shift = (addr & (DATA_SIZE - 1)) * 8; |
867b3201 RH |
201 | |
202 | /* Little-endian combine. */ | |
aac1fb05 | 203 | res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift)); |
867b3201 RH |
204 | return res; |
205 | } | |
206 | ||
867b3201 RH |
207 | haddr = addr + env->tlb_table[mmu_idx][index].addend; |
208 | #if DATA_SIZE == 1 | |
209 | res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); | |
210 | #else | |
211 | res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr); | |
212 | #endif | |
213 | return res; | |
214 | } | |
215 | ||
216 | #if DATA_SIZE > 1 | |
3972ef6f RH |
217 | WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, |
218 | TCGMemOpIdx oi, uintptr_t retaddr) | |
867b3201 | 219 | { |
3972ef6f | 220 | unsigned mmu_idx = get_mmuidx(oi); |
867b3201 RH |
221 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
222 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; | |
1f00b27f | 223 | int a_bits = get_alignment_bits(get_memop(oi)); |
867b3201 RH |
224 | uintptr_t haddr; |
225 | DATA_TYPE res; | |
226 | ||
227 | /* Adjust the given return address. */ | |
228 | retaddr -= GETPC_ADJ; | |
229 | ||
1f00b27f SS |
230 | if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) { |
231 | cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, | |
232 | mmu_idx, retaddr); | |
233 | } | |
234 | ||
867b3201 RH |
235 | /* If the TLB entry is for a different page, reload and try again. */ |
236 | if ((addr & TARGET_PAGE_MASK) | |
237 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
a390284b | 238 | if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { |
88e89a57 XT |
239 | tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, |
240 | mmu_idx, retaddr); | |
241 | } | |
867b3201 RH |
242 | tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; |
243 | } | |
244 | ||
245 | /* Handle an IO access. */ | |
246 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { | |
e469b22f | 247 | CPUIOTLBEntry *iotlbentry; |
867b3201 RH |
248 | if ((addr & (DATA_SIZE - 1)) != 0) { |
249 | goto do_unaligned_access; | |
250 | } | |
e469b22f | 251 | iotlbentry = &env->iotlb[mmu_idx][index]; |
867b3201 RH |
252 | |
253 | /* ??? Note that the io helpers always read data in the target | |
254 | byte ordering. We should push the LE/BE request down into io. */ | |
e469b22f | 255 | res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); |
867b3201 RH |
256 | res = TGT_BE(res); |
257 | return res; | |
258 | } | |
259 | ||
260 | /* Handle slow unaligned access (it spans two pages or IO). */ | |
261 | if (DATA_SIZE > 1 | |
262 | && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 | |
263 | >= TARGET_PAGE_SIZE)) { | |
264 | target_ulong addr1, addr2; | |
265 | DATA_TYPE res1, res2; | |
266 | unsigned shift; | |
267 | do_unaligned_access: | |
867b3201 RH |
268 | addr1 = addr & ~(DATA_SIZE - 1); |
269 | addr2 = addr1 + DATA_SIZE; | |
270 | /* Note the adjustment at the beginning of the function. | |
271 | Undo that for the recursion. */ | |
3972ef6f RH |
272 | res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ); |
273 | res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ); | |
867b3201 RH |
274 | shift = (addr & (DATA_SIZE - 1)) * 8; |
275 | ||
276 | /* Big-endian combine. */ | |
277 | res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); | |
aac1fb05 RH |
278 | return res; |
279 | } | |
280 | ||
aac1fb05 | 281 | haddr = addr + env->tlb_table[mmu_idx][index].addend; |
867b3201 RH |
282 | res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); |
283 | return res; | |
b92e5a22 | 284 | } |
867b3201 | 285 | #endif /* DATA_SIZE > 1 */ |
b92e5a22 | 286 | |
b769d8fe FB |
287 | #ifndef SOFTMMU_CODE_ACCESS |
288 | ||
c8f94df5 RH |
289 | /* Provide signed versions of the load routines as well. We can of course |
290 | avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ | |
291 | #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS | |
867b3201 | 292 | WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr, |
3972ef6f | 293 | TCGMemOpIdx oi, uintptr_t retaddr) |
867b3201 | 294 | { |
3972ef6f | 295 | return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr); |
867b3201 RH |
296 | } |
297 | ||
298 | # if DATA_SIZE > 1 | |
299 | WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, | |
3972ef6f | 300 | TCGMemOpIdx oi, uintptr_t retaddr) |
c8f94df5 | 301 | { |
3972ef6f | 302 | return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr); |
c8f94df5 | 303 | } |
867b3201 | 304 | # endif |
c8f94df5 RH |
305 | #endif |
306 | ||
89c33337 | 307 | static inline void glue(io_write, SUFFIX)(CPUArchState *env, |
e469b22f | 308 | CPUIOTLBEntry *iotlbentry, |
b769d8fe | 309 | DATA_TYPE val, |
0f459d16 | 310 | target_ulong addr, |
20503968 | 311 | uintptr_t retaddr) |
b769d8fe | 312 | { |
09daed84 | 313 | CPUState *cpu = ENV_GET_CPU(env); |
e469b22f | 314 | hwaddr physaddr = iotlbentry->addr; |
a54c87b6 | 315 | MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); |
37ec01d4 | 316 | |
0f459d16 | 317 | physaddr = (physaddr & TARGET_PAGE_MASK) + addr; |
414b15c9 | 318 | if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { |
90b40a69 | 319 | cpu_io_recompile(cpu, retaddr); |
2e70f6ef | 320 | } |
b769d8fe | 321 | |
93afeade AF |
322 | cpu->mem_io_vaddr = addr; |
323 | cpu->mem_io_pc = retaddr; | |
3b643495 | 324 | memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT, |
fadc1cbe | 325 | iotlbentry->attrs); |
b769d8fe | 326 | } |
b92e5a22 | 327 | |
867b3201 | 328 | void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, |
3972ef6f | 329 | TCGMemOpIdx oi, uintptr_t retaddr) |
b92e5a22 | 330 | { |
3972ef6f | 331 | unsigned mmu_idx = get_mmuidx(oi); |
aac1fb05 RH |
332 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
333 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; | |
1f00b27f | 334 | int a_bits = get_alignment_bits(get_memop(oi)); |
aac1fb05 | 335 | uintptr_t haddr; |
3b46e624 | 336 | |
0f842f8a RH |
337 | /* Adjust the given return address. */ |
338 | retaddr -= GETPC_ADJ; | |
339 | ||
1f00b27f SS |
340 | if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) { |
341 | cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, | |
342 | mmu_idx, retaddr); | |
343 | } | |
344 | ||
aac1fb05 RH |
345 | /* If the TLB entry is for a different page, reload and try again. */ |
346 | if ((addr & TARGET_PAGE_MASK) | |
347 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
a390284b | 348 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
55e94093 | 349 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); |
88e89a57 | 350 | } |
aac1fb05 RH |
351 | tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
352 | } | |
353 | ||
354 | /* Handle an IO access. */ | |
355 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { | |
e469b22f | 356 | CPUIOTLBEntry *iotlbentry; |
aac1fb05 RH |
357 | if ((addr & (DATA_SIZE - 1)) != 0) { |
358 | goto do_unaligned_access; | |
359 | } | |
e469b22f | 360 | iotlbentry = &env->iotlb[mmu_idx][index]; |
867b3201 RH |
361 | |
362 | /* ??? Note that the io helpers always read data in the target | |
363 | byte ordering. We should push the LE/BE request down into io. */ | |
364 | val = TGT_LE(val); | |
e469b22f | 365 | glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); |
aac1fb05 RH |
366 | return; |
367 | } | |
368 | ||
369 | /* Handle slow unaligned access (it spans two pages or IO). */ | |
370 | if (DATA_SIZE > 1 | |
371 | && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 | |
372 | >= TARGET_PAGE_SIZE)) { | |
81daabaf SD |
373 | int i, index2; |
374 | target_ulong page2, tlb_addr2; | |
aac1fb05 | 375 | do_unaligned_access: |
81daabaf SD |
376 | /* Ensure the second page is in the TLB. Note that the first page |
377 | is already guaranteed to be filled, and that the second page | |
378 | cannot evict the first. */ | |
379 | page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; | |
380 | index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
381 | tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; | |
382 | if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) | |
383 | && !VICTIM_TLB_HIT(addr_write, page2)) { | |
384 | tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, | |
385 | mmu_idx, retaddr); | |
386 | } | |
387 | ||
388 | /* XXX: not efficient, but simple. */ | |
389 | /* This loop must go in the forward direction to avoid issues | |
390 | with self-modifying code in Windows 64-bit. */ | |
391 | for (i = 0; i < DATA_SIZE; ++i) { | |
867b3201 | 392 | /* Little-endian extract. */ |
aac1fb05 | 393 | uint8_t val8 = val >> (i * 8); |
867b3201 RH |
394 | /* Note the adjustment at the beginning of the function. |
395 | Undo that for the recursion. */ | |
396 | glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, | |
3972ef6f | 397 | oi, retaddr + GETPC_ADJ); |
867b3201 RH |
398 | } |
399 | return; | |
400 | } | |
401 | ||
867b3201 RH |
402 | haddr = addr + env->tlb_table[mmu_idx][index].addend; |
403 | #if DATA_SIZE == 1 | |
404 | glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); | |
405 | #else | |
406 | glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); | |
a64d4718 | 407 | #endif |
867b3201 RH |
408 | } |
409 | ||
410 | #if DATA_SIZE > 1 | |
411 | void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, | |
3972ef6f | 412 | TCGMemOpIdx oi, uintptr_t retaddr) |
867b3201 | 413 | { |
3972ef6f | 414 | unsigned mmu_idx = get_mmuidx(oi); |
867b3201 RH |
415 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
416 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; | |
1f00b27f | 417 | int a_bits = get_alignment_bits(get_memop(oi)); |
867b3201 RH |
418 | uintptr_t haddr; |
419 | ||
420 | /* Adjust the given return address. */ | |
421 | retaddr -= GETPC_ADJ; | |
422 | ||
1f00b27f SS |
423 | if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) { |
424 | cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, | |
425 | mmu_idx, retaddr); | |
426 | } | |
427 | ||
867b3201 RH |
428 | /* If the TLB entry is for a different page, reload and try again. */ |
429 | if ((addr & TARGET_PAGE_MASK) | |
430 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
a390284b | 431 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
55e94093 | 432 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); |
88e89a57 | 433 | } |
867b3201 RH |
434 | tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
435 | } | |
436 | ||
437 | /* Handle an IO access. */ | |
438 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { | |
e469b22f | 439 | CPUIOTLBEntry *iotlbentry; |
867b3201 RH |
440 | if ((addr & (DATA_SIZE - 1)) != 0) { |
441 | goto do_unaligned_access; | |
442 | } | |
e469b22f | 443 | iotlbentry = &env->iotlb[mmu_idx][index]; |
867b3201 RH |
444 | |
445 | /* ??? Note that the io helpers always read data in the target | |
446 | byte ordering. We should push the LE/BE request down into io. */ | |
447 | val = TGT_BE(val); | |
e469b22f | 448 | glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); |
867b3201 RH |
449 | return; |
450 | } | |
451 | ||
452 | /* Handle slow unaligned access (it spans two pages or IO). */ | |
453 | if (DATA_SIZE > 1 | |
454 | && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 | |
455 | >= TARGET_PAGE_SIZE)) { | |
81daabaf SD |
456 | int i, index2; |
457 | target_ulong page2, tlb_addr2; | |
867b3201 | 458 | do_unaligned_access: |
81daabaf SD |
459 | /* Ensure the second page is in the TLB. Note that the first page |
460 | is already guaranteed to be filled, and that the second page | |
461 | cannot evict the first. */ | |
462 | page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; | |
463 | index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
464 | tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; | |
465 | if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) | |
466 | && !VICTIM_TLB_HIT(addr_write, page2)) { | |
467 | tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, | |
468 | mmu_idx, retaddr); | |
469 | } | |
470 | ||
867b3201 | 471 | /* XXX: not efficient, but simple */ |
81daabaf SD |
472 | /* This loop must go in the forward direction to avoid issues |
473 | with self-modifying code. */ | |
474 | for (i = 0; i < DATA_SIZE; ++i) { | |
867b3201 RH |
475 | /* Big-endian extract. */ |
476 | uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8)); | |
0f842f8a RH |
477 | /* Note the adjustment at the beginning of the function. |
478 | Undo that for the recursion. */ | |
aac1fb05 | 479 | glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, |
3972ef6f | 480 | oi, retaddr + GETPC_ADJ); |
b92e5a22 | 481 | } |
aac1fb05 RH |
482 | return; |
483 | } | |
484 | ||
aac1fb05 | 485 | haddr = addr + env->tlb_table[mmu_idx][index].addend; |
867b3201 | 486 | glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); |
b92e5a22 | 487 | } |
867b3201 | 488 | #endif /* DATA_SIZE > 1 */ |
b92e5a22 | 489 | |
3b4afc9e YK |
490 | #if DATA_SIZE == 1 |
491 | /* Probe for whether the specified guest write access is permitted. | |
492 | * If it is not permitted then an exception will be taken in the same | |
493 | * way as if this were a real write access (and we will not return). | |
494 | * Otherwise the function will return, and there will be a valid | |
495 | * entry in the TLB for this access. | |
496 | */ | |
497 | void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, | |
498 | uintptr_t retaddr) | |
499 | { | |
500 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
501 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; | |
502 | ||
503 | if ((addr & TARGET_PAGE_MASK) | |
504 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
505 | /* TLB entry is for a different page */ | |
a390284b | 506 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
3b4afc9e YK |
507 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); |
508 | } | |
509 | } | |
510 | } | |
511 | #endif | |
b769d8fe FB |
512 | #endif /* !defined(SOFTMMU_CODE_ACCESS) */ |
513 | ||
514 | #undef READ_ACCESS_TYPE | |
b92e5a22 FB |
515 | #undef SHIFT |
516 | #undef DATA_TYPE | |
517 | #undef SUFFIX | |
701e3a5c | 518 | #undef LSUFFIX |
b92e5a22 | 519 | #undef DATA_SIZE |
84b7b8e7 | 520 | #undef ADDR_READ |
c8f94df5 RH |
521 | #undef WORD_TYPE |
522 | #undef SDATA_TYPE | |
523 | #undef USUFFIX | |
524 | #undef SSUFFIX | |
867b3201 RH |
525 | #undef BSWAP |
526 | #undef TGT_BE | |
527 | #undef TGT_LE | |
528 | #undef CPU_BE | |
529 | #undef CPU_LE | |
530 | #undef helper_le_ld_name | |
531 | #undef helper_be_ld_name | |
532 | #undef helper_le_lds_name | |
533 | #undef helper_be_lds_name | |
534 | #undef helper_le_st_name | |
535 | #undef helper_be_st_name | |
536 | #undef helper_te_ld_name | |
537 | #undef helper_te_st_name |