]>
Commit | Line | Data |
---|---|---|
0cac1b66 BS |
1 | /* |
2 | * Common CPU TLB handling | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
7b31bbc2 | 20 | #include "qemu/osdep.h" |
0cac1b66 | 21 | #include "cpu.h" |
022c62cb PB |
22 | #include "exec/exec-all.h" |
23 | #include "exec/memory.h" | |
24 | #include "exec/address-spaces.h" | |
f08b6170 | 25 | #include "exec/cpu_ldst.h" |
022c62cb | 26 | #include "exec/cputlb.h" |
022c62cb | 27 | #include "exec/memory-internal.h" |
220c3ebd | 28 | #include "exec/ram_addr.h" |
0f590e74 | 29 | #include "tcg/tcg.h" |
d7f30403 PM |
30 | #include "qemu/error-report.h" |
31 | #include "exec/log.h" | |
c482cb11 RH |
32 | #include "exec/helper-proto.h" |
33 | #include "qemu/atomic.h" | |
0cac1b66 | 34 | |
8526e1f4 AB |
35 | /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ |
36 | /* #define DEBUG_TLB */ | |
37 | /* #define DEBUG_TLB_LOG */ | |
38 | ||
39 | #ifdef DEBUG_TLB | |
40 | # define DEBUG_TLB_GATE 1 | |
41 | # ifdef DEBUG_TLB_LOG | |
42 | # define DEBUG_TLB_LOG_GATE 1 | |
43 | # else | |
44 | # define DEBUG_TLB_LOG_GATE 0 | |
45 | # endif | |
46 | #else | |
47 | # define DEBUG_TLB_GATE 0 | |
48 | # define DEBUG_TLB_LOG_GATE 0 | |
49 | #endif | |
50 | ||
51 | #define tlb_debug(fmt, ...) do { \ | |
52 | if (DEBUG_TLB_LOG_GATE) { \ | |
53 | qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ | |
54 | ## __VA_ARGS__); \ | |
55 | } else if (DEBUG_TLB_GATE) { \ | |
56 | fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ | |
57 | } \ | |
58 | } while (0) | |
0cac1b66 BS |
59 | |
60 | /* statistics */ | |
61 | int tlb_flush_count; | |
62 | ||
0cac1b66 BS |
63 | /* NOTE: |
64 | * If flush_global is true (the usual case), flush all tlb entries. | |
65 | * If flush_global is false, flush (at least) all tlb entries not | |
66 | * marked global. | |
67 | * | |
68 | * Since QEMU doesn't currently implement a global/not-global flag | |
69 | * for tlb entries, at the moment tlb_flush() will also flush all | |
70 | * tlb entries in the flush_global == false case. This is OK because | |
71 | * CPU architectures generally permit an implementation to drop | |
72 | * entries from the TLB at any time, so flushing more entries than | |
73 | * required is only an efficiency issue, not a correctness issue. | |
74 | */ | |
00c8cb0a | 75 | void tlb_flush(CPUState *cpu, int flush_global) |
0cac1b66 | 76 | { |
00c8cb0a | 77 | CPUArchState *env = cpu->env_ptr; |
0cac1b66 | 78 | |
8526e1f4 AB |
79 | tlb_debug("(%d)\n", flush_global); |
80 | ||
4fadb3bb | 81 | memset(env->tlb_table, -1, sizeof(env->tlb_table)); |
88e89a57 | 82 | memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); |
8cd70437 | 83 | memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); |
0cac1b66 | 84 | |
88e89a57 | 85 | env->vtlb_index = 0; |
0cac1b66 BS |
86 | env->tlb_flush_addr = -1; |
87 | env->tlb_flush_mask = 0; | |
88 | tlb_flush_count++; | |
89 | } | |
90 | ||
d7a74a9d PM |
91 | static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp) |
92 | { | |
93 | CPUArchState *env = cpu->env_ptr; | |
94 | ||
8526e1f4 | 95 | tlb_debug("start\n"); |
d7a74a9d PM |
96 | |
97 | for (;;) { | |
98 | int mmu_idx = va_arg(argp, int); | |
99 | ||
100 | if (mmu_idx < 0) { | |
101 | break; | |
102 | } | |
103 | ||
8526e1f4 | 104 | tlb_debug("%d\n", mmu_idx); |
d7a74a9d PM |
105 | |
106 | memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0])); | |
107 | memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); | |
108 | } | |
109 | ||
d7a74a9d PM |
110 | memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); |
111 | } | |
112 | ||
113 | void tlb_flush_by_mmuidx(CPUState *cpu, ...) | |
114 | { | |
115 | va_list argp; | |
116 | va_start(argp, cpu); | |
117 | v_tlb_flush_by_mmuidx(cpu, argp); | |
118 | va_end(argp); | |
119 | } | |
120 | ||
0cac1b66 BS |
121 | static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
122 | { | |
123 | if (addr == (tlb_entry->addr_read & | |
124 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || | |
125 | addr == (tlb_entry->addr_write & | |
126 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || | |
127 | addr == (tlb_entry->addr_code & | |
128 | (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
4fadb3bb | 129 | memset(tlb_entry, -1, sizeof(*tlb_entry)); |
0cac1b66 BS |
130 | } |
131 | } | |
132 | ||
31b030d4 | 133 | void tlb_flush_page(CPUState *cpu, target_ulong addr) |
0cac1b66 | 134 | { |
31b030d4 | 135 | CPUArchState *env = cpu->env_ptr; |
0cac1b66 BS |
136 | int i; |
137 | int mmu_idx; | |
138 | ||
8526e1f4 AB |
139 | tlb_debug("page :" TARGET_FMT_lx "\n", addr); |
140 | ||
0cac1b66 BS |
141 | /* Check if we need to flush due to large pages. */ |
142 | if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { | |
8526e1f4 AB |
143 | tlb_debug("forcing full flush (" |
144 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", | |
145 | env->tlb_flush_addr, env->tlb_flush_mask); | |
146 | ||
00c8cb0a | 147 | tlb_flush(cpu, 1); |
0cac1b66 BS |
148 | return; |
149 | } | |
0cac1b66 BS |
150 | |
151 | addr &= TARGET_PAGE_MASK; | |
152 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
153 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
154 | tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); | |
155 | } | |
156 | ||
88e89a57 XT |
157 | /* check whether there are entries that need to be flushed in the vtlb */ |
158 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
159 | int k; | |
160 | for (k = 0; k < CPU_VTLB_SIZE; k++) { | |
161 | tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr); | |
162 | } | |
163 | } | |
164 | ||
611d4f99 | 165 | tb_flush_jmp_cache(cpu, addr); |
0cac1b66 BS |
166 | } |
167 | ||
d7a74a9d PM |
168 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...) |
169 | { | |
170 | CPUArchState *env = cpu->env_ptr; | |
171 | int i, k; | |
172 | va_list argp; | |
173 | ||
174 | va_start(argp, addr); | |
175 | ||
8526e1f4 AB |
176 | tlb_debug("addr "TARGET_FMT_lx"\n", addr); |
177 | ||
d7a74a9d PM |
178 | /* Check if we need to flush due to large pages. */ |
179 | if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { | |
8526e1f4 AB |
180 | tlb_debug("forced full flush (" |
181 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", | |
182 | env->tlb_flush_addr, env->tlb_flush_mask); | |
183 | ||
d7a74a9d PM |
184 | v_tlb_flush_by_mmuidx(cpu, argp); |
185 | va_end(argp); | |
186 | return; | |
187 | } | |
d7a74a9d PM |
188 | |
189 | addr &= TARGET_PAGE_MASK; | |
190 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
191 | ||
192 | for (;;) { | |
193 | int mmu_idx = va_arg(argp, int); | |
194 | ||
195 | if (mmu_idx < 0) { | |
196 | break; | |
197 | } | |
198 | ||
8526e1f4 | 199 | tlb_debug("idx %d\n", mmu_idx); |
d7a74a9d PM |
200 | |
201 | tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); | |
202 | ||
203 | /* check whether there are vltb entries that need to be flushed */ | |
204 | for (k = 0; k < CPU_VTLB_SIZE; k++) { | |
205 | tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr); | |
206 | } | |
207 | } | |
208 | va_end(argp); | |
209 | ||
d7a74a9d PM |
210 | tb_flush_jmp_cache(cpu, addr); |
211 | } | |
212 | ||
0cac1b66 BS |
213 | /* update the TLBs so that writes to code in the virtual page 'addr' |
214 | can be detected */ | |
215 | void tlb_protect_code(ram_addr_t ram_addr) | |
216 | { | |
03eebc9e SH |
217 | cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, |
218 | DIRTY_MEMORY_CODE); | |
0cac1b66 BS |
219 | } |
220 | ||
221 | /* update the TLB so that writes in physical page 'phys_addr' are no longer | |
222 | tested for self modifying code */ | |
9564f52d | 223 | void tlb_unprotect_code(ram_addr_t ram_addr) |
0cac1b66 | 224 | { |
52159192 | 225 | cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); |
0cac1b66 BS |
226 | } |
227 | ||
228 | static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe) | |
229 | { | |
230 | return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0; | |
231 | } | |
232 | ||
233 | void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start, | |
234 | uintptr_t length) | |
235 | { | |
236 | uintptr_t addr; | |
237 | ||
238 | if (tlb_is_dirty_ram(tlb_entry)) { | |
239 | addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; | |
240 | if ((addr - start) < length) { | |
241 | tlb_entry->addr_write |= TLB_NOTDIRTY; | |
242 | } | |
243 | } | |
244 | } | |
245 | ||
7443b437 PB |
246 | static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) |
247 | { | |
248 | ram_addr_t ram_addr; | |
249 | ||
07bdaa41 PB |
250 | ram_addr = qemu_ram_addr_from_host(ptr); |
251 | if (ram_addr == RAM_ADDR_INVALID) { | |
7443b437 PB |
252 | fprintf(stderr, "Bad ram pointer %p\n", ptr); |
253 | abort(); | |
254 | } | |
255 | return ram_addr; | |
256 | } | |
257 | ||
9a13565d | 258 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) |
0cac1b66 BS |
259 | { |
260 | CPUArchState *env; | |
261 | ||
9a13565d | 262 | int mmu_idx; |
0cac1b66 | 263 | |
9a13565d PC |
264 | env = cpu->env_ptr; |
265 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
266 | unsigned int i; | |
0cac1b66 | 267 | |
9a13565d PC |
268 | for (i = 0; i < CPU_TLB_SIZE; i++) { |
269 | tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], | |
270 | start1, length); | |
271 | } | |
88e89a57 | 272 | |
9a13565d PC |
273 | for (i = 0; i < CPU_VTLB_SIZE; i++) { |
274 | tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], | |
275 | start1, length); | |
0cac1b66 BS |
276 | } |
277 | } | |
278 | } | |
279 | ||
280 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) | |
281 | { | |
282 | if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { | |
283 | tlb_entry->addr_write = vaddr; | |
284 | } | |
285 | } | |
286 | ||
287 | /* update the TLB corresponding to virtual page vaddr | |
288 | so that it is no longer dirty */ | |
bcae01e4 | 289 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) |
0cac1b66 | 290 | { |
bcae01e4 | 291 | CPUArchState *env = cpu->env_ptr; |
0cac1b66 BS |
292 | int i; |
293 | int mmu_idx; | |
294 | ||
295 | vaddr &= TARGET_PAGE_MASK; | |
296 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
297 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
298 | tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); | |
299 | } | |
88e89a57 XT |
300 | |
301 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
302 | int k; | |
303 | for (k = 0; k < CPU_VTLB_SIZE; k++) { | |
304 | tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr); | |
305 | } | |
306 | } | |
0cac1b66 BS |
307 | } |
308 | ||
309 | /* Our TLB does not support large pages, so remember the area covered by | |
310 | large pages and trigger a full TLB flush if these are invalidated. */ | |
311 | static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, | |
312 | target_ulong size) | |
313 | { | |
314 | target_ulong mask = ~(size - 1); | |
315 | ||
316 | if (env->tlb_flush_addr == (target_ulong)-1) { | |
317 | env->tlb_flush_addr = vaddr & mask; | |
318 | env->tlb_flush_mask = mask; | |
319 | return; | |
320 | } | |
321 | /* Extend the existing region to include the new page. | |
322 | This is a compromise between unnecessary flushes and the cost | |
323 | of maintaining a full variable size TLB. */ | |
324 | mask &= env->tlb_flush_mask; | |
325 | while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { | |
326 | mask <<= 1; | |
327 | } | |
328 | env->tlb_flush_addr &= mask; | |
329 | env->tlb_flush_mask = mask; | |
330 | } | |
331 | ||
332 | /* Add a new TLB entry. At most one entry for a given virtual address | |
79e2b9ae PB |
333 | * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the |
334 | * supplied size is only used by tlb_flush_page. | |
335 | * | |
336 | * Called from TCG-generated code, which is under an RCU read-side | |
337 | * critical section. | |
338 | */ | |
fadc1cbe PM |
339 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
340 | hwaddr paddr, MemTxAttrs attrs, int prot, | |
341 | int mmu_idx, target_ulong size) | |
0cac1b66 | 342 | { |
0c591eb0 | 343 | CPUArchState *env = cpu->env_ptr; |
0cac1b66 BS |
344 | MemoryRegionSection *section; |
345 | unsigned int index; | |
346 | target_ulong address; | |
347 | target_ulong code_address; | |
348 | uintptr_t addend; | |
349 | CPUTLBEntry *te; | |
149f54b5 | 350 | hwaddr iotlb, xlat, sz; |
88e89a57 | 351 | unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; |
d7898cda | 352 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
0cac1b66 BS |
353 | |
354 | assert(size >= TARGET_PAGE_SIZE); | |
355 | if (size != TARGET_PAGE_SIZE) { | |
356 | tlb_add_large_page(env, vaddr, size); | |
357 | } | |
149f54b5 PB |
358 | |
359 | sz = size; | |
d7898cda | 360 | section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz); |
149f54b5 PB |
361 | assert(sz >= TARGET_PAGE_SIZE); |
362 | ||
8526e1f4 AB |
363 | tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx |
364 | " prot=%x idx=%d\n", | |
365 | vaddr, paddr, prot, mmu_idx); | |
0cac1b66 BS |
366 | |
367 | address = vaddr; | |
8f3e03cb PB |
368 | if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) { |
369 | /* IO memory case */ | |
0cac1b66 | 370 | address |= TLB_MMIO; |
8f3e03cb PB |
371 | addend = 0; |
372 | } else { | |
373 | /* TLB_MMIO for rom/romd handled below */ | |
149f54b5 | 374 | addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; |
0cac1b66 | 375 | } |
0cac1b66 BS |
376 | |
377 | code_address = address; | |
bb0e627a | 378 | iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat, |
149f54b5 | 379 | prot, &address); |
0cac1b66 BS |
380 | |
381 | index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
0cac1b66 | 382 | te = &env->tlb_table[mmu_idx][index]; |
88e89a57 XT |
383 | |
384 | /* do not discard the translation in te, evict it into a victim tlb */ | |
385 | env->tlb_v_table[mmu_idx][vidx] = *te; | |
386 | env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; | |
387 | ||
388 | /* refill the tlb */ | |
e469b22f | 389 | env->iotlb[mmu_idx][index].addr = iotlb - vaddr; |
fadc1cbe | 390 | env->iotlb[mmu_idx][index].attrs = attrs; |
0cac1b66 BS |
391 | te->addend = addend - vaddr; |
392 | if (prot & PAGE_READ) { | |
393 | te->addr_read = address; | |
394 | } else { | |
395 | te->addr_read = -1; | |
396 | } | |
397 | ||
398 | if (prot & PAGE_EXEC) { | |
399 | te->addr_code = code_address; | |
400 | } else { | |
401 | te->addr_code = -1; | |
402 | } | |
403 | if (prot & PAGE_WRITE) { | |
404 | if ((memory_region_is_ram(section->mr) && section->readonly) | |
cc5bea60 | 405 | || memory_region_is_romd(section->mr)) { |
0cac1b66 BS |
406 | /* Write access calls the I/O callback. */ |
407 | te->addr_write = address | TLB_MMIO; | |
408 | } else if (memory_region_is_ram(section->mr) | |
8e41fb63 FZ |
409 | && cpu_physical_memory_is_clean( |
410 | memory_region_get_ram_addr(section->mr) + xlat)) { | |
0cac1b66 BS |
411 | te->addr_write = address | TLB_NOTDIRTY; |
412 | } else { | |
413 | te->addr_write = address; | |
414 | } | |
415 | } else { | |
416 | te->addr_write = -1; | |
417 | } | |
418 | } | |
419 | ||
fadc1cbe PM |
420 | /* Add a new TLB entry, but without specifying the memory |
421 | * transaction attributes to be used. | |
422 | */ | |
423 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | |
424 | hwaddr paddr, int prot, | |
425 | int mmu_idx, target_ulong size) | |
426 | { | |
427 | tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, | |
428 | prot, mmu_idx, size); | |
429 | } | |
430 | ||
d7f30403 PM |
431 | static void report_bad_exec(CPUState *cpu, target_ulong addr) |
432 | { | |
433 | /* Accidentally executing outside RAM or ROM is quite common for | |
434 | * several user-error situations, so report it in a way that | |
435 | * makes it clear that this isn't a QEMU bug and provide suggestions | |
436 | * about what a user could do to fix things. | |
437 | */ | |
438 | error_report("Trying to execute code outside RAM or ROM at 0x" | |
439 | TARGET_FMT_lx, addr); | |
440 | error_printf("This usually means one of the following happened:\n\n" | |
441 | "(1) You told QEMU to execute a kernel for the wrong machine " | |
442 | "type, and it crashed on startup (eg trying to run a " | |
443 | "raspberry pi kernel on a versatilepb QEMU machine)\n" | |
444 | "(2) You didn't give QEMU a kernel or BIOS filename at all, " | |
445 | "and QEMU executed a ROM full of no-op instructions until " | |
446 | "it fell off the end\n" | |
447 | "(3) Your guest kernel has a bug and crashed by jumping " | |
448 | "off into nowhere\n\n" | |
449 | "This is almost always one of the first two, so check your " | |
450 | "command line and that you are using the right type of kernel " | |
451 | "for this machine.\n" | |
452 | "If you think option (3) is likely then you can try debugging " | |
453 | "your guest with the -d debug options; in particular " | |
454 | "-d guest_errors will cause the log to include a dump of the " | |
455 | "guest register state at this point.\n\n" | |
456 | "Execution cannot continue; stopping here.\n\n"); | |
457 | ||
458 | /* Report also to the logs, with more detail including register dump */ | |
459 | qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code " | |
460 | "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); | |
461 | log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); | |
462 | } | |
463 | ||
0cac1b66 BS |
464 | /* NOTE: this function can trigger an exception */ |
465 | /* NOTE2: the returned address is not exactly the physical address: it | |
116aae36 PM |
466 | * is actually a ram_addr_t (in system mode; the user mode emulation |
467 | * version of this function returns a guest virtual address). | |
468 | */ | |
0cac1b66 BS |
469 | tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
470 | { | |
471 | int mmu_idx, page_index, pd; | |
472 | void *p; | |
473 | MemoryRegion *mr; | |
09daed84 | 474 | CPUState *cpu = ENV_GET_CPU(env1); |
a54c87b6 | 475 | CPUIOTLBEntry *iotlbentry; |
0cac1b66 BS |
476 | |
477 | page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
97ed5ccd | 478 | mmu_idx = cpu_mmu_index(env1, true); |
0cac1b66 BS |
479 | if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != |
480 | (addr & TARGET_PAGE_MASK))) { | |
0cac1b66 | 481 | cpu_ldub_code(env1, addr); |
0cac1b66 | 482 | } |
a54c87b6 PM |
483 | iotlbentry = &env1->iotlb[mmu_idx][page_index]; |
484 | pd = iotlbentry->addr & ~TARGET_PAGE_MASK; | |
485 | mr = iotlb_to_region(cpu, pd, iotlbentry->attrs); | |
0cac1b66 | 486 | if (memory_region_is_unassigned(mr)) { |
c658b94f AF |
487 | CPUClass *cc = CPU_GET_CLASS(cpu); |
488 | ||
489 | if (cc->do_unassigned_access) { | |
490 | cc->do_unassigned_access(cpu, addr, false, true, 0, 4); | |
491 | } else { | |
d7f30403 PM |
492 | report_bad_exec(cpu, addr); |
493 | exit(1); | |
c658b94f | 494 | } |
0cac1b66 BS |
495 | } |
496 | p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend); | |
497 | return qemu_ram_addr_from_host_nofail(p); | |
498 | } | |
499 | ||
82a45b96 RH |
500 | static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, |
501 | target_ulong addr, uintptr_t retaddr, int size) | |
502 | { | |
503 | CPUState *cpu = ENV_GET_CPU(env); | |
504 | hwaddr physaddr = iotlbentry->addr; | |
505 | MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); | |
506 | uint64_t val; | |
507 | ||
508 | physaddr = (physaddr & TARGET_PAGE_MASK) + addr; | |
509 | cpu->mem_io_pc = retaddr; | |
510 | if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { | |
511 | cpu_io_recompile(cpu, retaddr); | |
512 | } | |
513 | ||
514 | cpu->mem_io_vaddr = addr; | |
515 | memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs); | |
516 | return val; | |
517 | } | |
518 | ||
519 | static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | |
520 | uint64_t val, target_ulong addr, | |
521 | uintptr_t retaddr, int size) | |
522 | { | |
523 | CPUState *cpu = ENV_GET_CPU(env); | |
524 | hwaddr physaddr = iotlbentry->addr; | |
525 | MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); | |
526 | ||
527 | physaddr = (physaddr & TARGET_PAGE_MASK) + addr; | |
528 | if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { | |
529 | cpu_io_recompile(cpu, retaddr); | |
530 | } | |
531 | ||
532 | cpu->mem_io_vaddr = addr; | |
533 | cpu->mem_io_pc = retaddr; | |
534 | memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs); | |
535 | } | |
536 | ||
7e9a7c50 RH |
537 | /* Return true if ADDR is present in the victim tlb, and has been copied |
538 | back to the main tlb. */ | |
539 | static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, | |
540 | size_t elt_ofs, target_ulong page) | |
541 | { | |
542 | size_t vidx; | |
543 | for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { | |
544 | CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; | |
545 | target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); | |
546 | ||
547 | if (cmp == page) { | |
548 | /* Found entry in victim tlb, swap tlb and iotlb. */ | |
549 | CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; | |
550 | CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; | |
551 | CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; | |
552 | ||
553 | tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb; | |
554 | tmpio = *io; *io = *vio; *vio = tmpio; | |
555 | return true; | |
556 | } | |
557 | } | |
558 | return false; | |
559 | } | |
560 | ||
561 | /* Macro to call the above, with local variables from the use context. */ | |
a390284b | 562 | #define VICTIM_TLB_HIT(TY, ADDR) \ |
7e9a7c50 | 563 | victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ |
a390284b | 564 | (ADDR) & TARGET_PAGE_MASK) |
7e9a7c50 | 565 | |
3b08f0a9 RH |
566 | /* Probe for whether the specified guest write access is permitted. |
567 | * If it is not permitted then an exception will be taken in the same | |
568 | * way as if this were a real write access (and we will not return). | |
569 | * Otherwise the function will return, and there will be a valid | |
570 | * entry in the TLB for this access. | |
571 | */ | |
572 | void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, | |
573 | uintptr_t retaddr) | |
574 | { | |
575 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
576 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; | |
577 | ||
578 | if ((addr & TARGET_PAGE_MASK) | |
579 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
580 | /* TLB entry is for a different page */ | |
581 | if (!VICTIM_TLB_HIT(addr_write, addr)) { | |
582 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); | |
583 | } | |
584 | } | |
585 | } | |
586 | ||
c482cb11 RH |
587 | /* Probe for a read-modify-write atomic operation. Do not allow unaligned |
588 | * operations, or io operations to proceed. Return the host address. */ | |
589 | static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | |
590 | TCGMemOpIdx oi, uintptr_t retaddr) | |
591 | { | |
592 | size_t mmu_idx = get_mmuidx(oi); | |
593 | size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
594 | CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index]; | |
595 | target_ulong tlb_addr = tlbe->addr_write; | |
596 | TCGMemOp mop = get_memop(oi); | |
597 | int a_bits = get_alignment_bits(mop); | |
598 | int s_bits = mop & MO_SIZE; | |
599 | ||
600 | /* Adjust the given return address. */ | |
601 | retaddr -= GETPC_ADJ; | |
602 | ||
603 | /* Enforce guest required alignment. */ | |
604 | if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { | |
605 | /* ??? Maybe indicate atomic op to cpu_unaligned_access */ | |
606 | cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, | |
607 | mmu_idx, retaddr); | |
608 | } | |
609 | ||
610 | /* Enforce qemu required alignment. */ | |
611 | if (unlikely(addr & ((1 << s_bits) - 1))) { | |
612 | /* We get here if guest alignment was not requested, | |
613 | or was not enforced by cpu_unaligned_access above. | |
614 | We might widen the access and emulate, but for now | |
615 | mark an exception and exit the cpu loop. */ | |
616 | goto stop_the_world; | |
617 | } | |
618 | ||
619 | /* Check TLB entry and enforce page permissions. */ | |
620 | if ((addr & TARGET_PAGE_MASK) | |
621 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
622 | if (!VICTIM_TLB_HIT(addr_write, addr)) { | |
623 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); | |
624 | } | |
625 | tlb_addr = tlbe->addr_write; | |
626 | } | |
627 | ||
628 | /* Notice an IO access, or a notdirty page. */ | |
629 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { | |
630 | /* There's really nothing that can be done to | |
631 | support this apart from stop-the-world. */ | |
632 | goto stop_the_world; | |
633 | } | |
634 | ||
635 | /* Let the guest notice RMW on a write-only page. */ | |
636 | if (unlikely(tlbe->addr_read != tlb_addr)) { | |
637 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr); | |
638 | /* Since we don't support reads and writes to different addresses, | |
639 | and we do have the proper page loaded for write, this shouldn't | |
640 | ever return. But just in case, handle via stop-the-world. */ | |
641 | goto stop_the_world; | |
642 | } | |
643 | ||
644 | return (void *)((uintptr_t)addr + tlbe->addend); | |
645 | ||
646 | stop_the_world: | |
647 | cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); | |
648 | } | |
649 | ||
c86c6e4c RH |
650 | #ifdef TARGET_WORDS_BIGENDIAN |
651 | # define TGT_BE(X) (X) | |
652 | # define TGT_LE(X) BSWAP(X) | |
653 | #else | |
654 | # define TGT_BE(X) BSWAP(X) | |
655 | # define TGT_LE(X) (X) | |
656 | #endif | |
657 | ||
0f590e74 PB |
658 | #define MMUSUFFIX _mmu |
659 | ||
dea21982 | 660 | #define DATA_SIZE 1 |
58ed270d | 661 | #include "softmmu_template.h" |
0f590e74 | 662 | |
dea21982 | 663 | #define DATA_SIZE 2 |
58ed270d | 664 | #include "softmmu_template.h" |
0f590e74 | 665 | |
dea21982 | 666 | #define DATA_SIZE 4 |
58ed270d | 667 | #include "softmmu_template.h" |
0f590e74 | 668 | |
dea21982 | 669 | #define DATA_SIZE 8 |
58ed270d | 670 | #include "softmmu_template.h" |
0f590e74 | 671 | |
c482cb11 RH |
672 | /* First set of helpers allows passing in of OI and RETADDR. This makes |
673 | them callable from other helpers. */ | |
674 | ||
675 | #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr | |
676 | #define ATOMIC_NAME(X) \ | |
677 | HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) | |
678 | #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) | |
679 | ||
680 | #define DATA_SIZE 1 | |
681 | #include "atomic_template.h" | |
682 | ||
683 | #define DATA_SIZE 2 | |
684 | #include "atomic_template.h" | |
685 | ||
686 | #define DATA_SIZE 4 | |
687 | #include "atomic_template.h" | |
688 | ||
df79b996 | 689 | #ifdef CONFIG_ATOMIC64 |
c482cb11 RH |
690 | #define DATA_SIZE 8 |
691 | #include "atomic_template.h" | |
df79b996 | 692 | #endif |
c482cb11 | 693 | |
7ebee43e RH |
694 | #ifdef CONFIG_ATOMIC128 |
695 | #define DATA_SIZE 16 | |
696 | #include "atomic_template.h" | |
697 | #endif | |
698 | ||
c482cb11 RH |
699 | /* Second set of helpers are directly callable from TCG as helpers. */ |
700 | ||
701 | #undef EXTRA_ARGS | |
702 | #undef ATOMIC_NAME | |
703 | #undef ATOMIC_MMU_LOOKUP | |
704 | #define EXTRA_ARGS , TCGMemOpIdx oi | |
705 | #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) | |
706 | #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) | |
707 | ||
708 | #define DATA_SIZE 1 | |
709 | #include "atomic_template.h" | |
710 | ||
711 | #define DATA_SIZE 2 | |
712 | #include "atomic_template.h" | |
713 | ||
714 | #define DATA_SIZE 4 | |
715 | #include "atomic_template.h" | |
716 | ||
df79b996 | 717 | #ifdef CONFIG_ATOMIC64 |
c482cb11 RH |
718 | #define DATA_SIZE 8 |
719 | #include "atomic_template.h" | |
df79b996 | 720 | #endif |
c482cb11 RH |
721 | |
722 | /* Code access functions. */ | |
723 | ||
724 | #undef MMUSUFFIX | |
0cac1b66 | 725 | #define MMUSUFFIX _cmmu |
01ecaf43 RH |
726 | #undef GETPC |
727 | #define GETPC() ((uintptr_t)0) | |
0cac1b66 BS |
728 | #define SOFTMMU_CODE_ACCESS |
729 | ||
dea21982 | 730 | #define DATA_SIZE 1 |
58ed270d | 731 | #include "softmmu_template.h" |
0cac1b66 | 732 | |
dea21982 | 733 | #define DATA_SIZE 2 |
58ed270d | 734 | #include "softmmu_template.h" |
0cac1b66 | 735 | |
dea21982 | 736 | #define DATA_SIZE 4 |
58ed270d | 737 | #include "softmmu_template.h" |
0cac1b66 | 738 | |
dea21982 | 739 | #define DATA_SIZE 8 |
58ed270d | 740 | #include "softmmu_template.h" |