]>
Commit | Line | Data |
---|---|---|
54936004 | 1 | /* |
fd6ce8f6 | 2 | * virtual page mapping and translated block handling |
54936004 FB |
3 | * |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | */ | |
67b915a5 | 20 | #include "config.h" |
d5a8f07c FB |
21 | #ifdef _WIN32 |
22 | #include <windows.h> | |
23 | #else | |
a98d49b1 | 24 | #include <sys/types.h> |
d5a8f07c FB |
25 | #include <sys/mman.h> |
26 | #endif | |
54936004 FB |
27 | #include <stdlib.h> |
28 | #include <stdio.h> | |
29 | #include <stdarg.h> | |
30 | #include <string.h> | |
31 | #include <errno.h> | |
32 | #include <unistd.h> | |
33 | #include <inttypes.h> | |
34 | ||
6180a181 FB |
35 | #include "cpu.h" |
36 | #include "exec-all.h" | |
53a5960a PB |
37 | #if defined(CONFIG_USER_ONLY) |
38 | #include <qemu.h> | |
39 | #endif | |
54936004 | 40 | |
fd6ce8f6 | 41 | //#define DEBUG_TB_INVALIDATE |
66e85a21 | 42 | //#define DEBUG_FLUSH |
9fa3e853 | 43 | //#define DEBUG_TLB |
67d3b957 | 44 | //#define DEBUG_UNASSIGNED |
fd6ce8f6 FB |
45 | |
46 | /* make various TB consistency checks */ | |
47 | //#define DEBUG_TB_CHECK | |
98857888 | 48 | //#define DEBUG_TLB_CHECK |
fd6ce8f6 | 49 | |
1196be37 TS |
50 | //#define DEBUG_IOPORT |
51 | ||
99773bd4 PB |
52 | #if !defined(CONFIG_USER_ONLY) |
53 | /* TB consistency checks only implemented for usermode emulation. */ | |
54 | #undef DEBUG_TB_CHECK | |
55 | #endif | |
56 | ||
fd6ce8f6 FB |
57 | /* threshold to flush the translated code buffer */ |
58 | #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE) | |
59 | ||
9fa3e853 FB |
60 | #define SMC_BITMAP_USE_THRESHOLD 10 |
61 | ||
62 | #define MMAP_AREA_START 0x00000000 | |
63 | #define MMAP_AREA_END 0xa8000000 | |
fd6ce8f6 | 64 | |
108c49b8 FB |
65 | #if defined(TARGET_SPARC64) |
66 | #define TARGET_PHYS_ADDR_SPACE_BITS 41 | |
67 | #elif defined(TARGET_PPC64) | |
68 | #define TARGET_PHYS_ADDR_SPACE_BITS 42 | |
69 | #else | |
70 | /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */ | |
71 | #define TARGET_PHYS_ADDR_SPACE_BITS 32 | |
72 | #endif | |
73 | ||
fd6ce8f6 | 74 | TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; |
9fa3e853 | 75 | TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
fd6ce8f6 | 76 | int nb_tbs; |
eb51d102 FB |
77 | /* any access to the tbs or the page table must use this lock */ |
78 | spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; | |
fd6ce8f6 | 79 | |
b8076a74 | 80 | uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32))); |
fd6ce8f6 FB |
81 | uint8_t *code_gen_ptr; |
82 | ||
9fa3e853 FB |
83 | int phys_ram_size; |
84 | int phys_ram_fd; | |
85 | uint8_t *phys_ram_base; | |
1ccde1cb | 86 | uint8_t *phys_ram_dirty; |
e9a1ab19 | 87 | static ram_addr_t phys_ram_alloc_offset = 0; |
9fa3e853 | 88 | |
6a00d601 FB |
89 | CPUState *first_cpu; |
90 | /* current CPU in the current thread. It is only valid inside | |
91 | cpu_exec() */ | |
92 | CPUState *cpu_single_env; | |
93 | ||
54936004 | 94 | typedef struct PageDesc { |
92e873b9 | 95 | /* list of TBs intersecting this ram page */ |
fd6ce8f6 | 96 | TranslationBlock *first_tb; |
9fa3e853 FB |
97 | /* in order to optimize self modifying code, we count the number |
98 | of lookups we do to a given page to use a bitmap */ | |
99 | unsigned int code_write_count; | |
100 | uint8_t *code_bitmap; | |
101 | #if defined(CONFIG_USER_ONLY) | |
102 | unsigned long flags; | |
103 | #endif | |
54936004 FB |
104 | } PageDesc; |
105 | ||
92e873b9 FB |
106 | typedef struct PhysPageDesc { |
107 | /* offset in host memory of the page + io_index in the low 12 bits */ | |
e04f40b5 | 108 | uint32_t phys_offset; |
92e873b9 FB |
109 | } PhysPageDesc; |
110 | ||
54936004 FB |
111 | #define L2_BITS 10 |
112 | #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) | |
113 | ||
114 | #define L1_SIZE (1 << L1_BITS) | |
115 | #define L2_SIZE (1 << L2_BITS) | |
116 | ||
33417e70 | 117 | static void io_mem_init(void); |
fd6ce8f6 | 118 | |
83fb7adf FB |
119 | unsigned long qemu_real_host_page_size; |
120 | unsigned long qemu_host_page_bits; | |
121 | unsigned long qemu_host_page_size; | |
122 | unsigned long qemu_host_page_mask; | |
54936004 | 123 | |
92e873b9 | 124 | /* XXX: for system emulation, it could just be an array */ |
54936004 | 125 | static PageDesc *l1_map[L1_SIZE]; |
0a962c02 | 126 | PhysPageDesc **l1_phys_map; |
54936004 | 127 | |
33417e70 | 128 | /* io memory support */ |
33417e70 FB |
129 | CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
130 | CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; | |
a4193c8a | 131 | void *io_mem_opaque[IO_MEM_NB_ENTRIES]; |
33417e70 | 132 | static int io_mem_nb; |
6658ffb8 PB |
133 | #if defined(CONFIG_SOFTMMU) |
134 | static int io_mem_watch; | |
135 | #endif | |
33417e70 | 136 | |
34865134 FB |
137 | /* log support */ |
138 | char *logfilename = "/tmp/qemu.log"; | |
139 | FILE *logfile; | |
140 | int loglevel; | |
141 | ||
e3db7226 FB |
142 | /* statistics */ |
143 | static int tlb_flush_count; | |
144 | static int tb_flush_count; | |
145 | static int tb_phys_invalidate_count; | |
146 | ||
b346ff46 | 147 | static void page_init(void) |
54936004 | 148 | { |
83fb7adf | 149 | /* NOTE: we can always suppose that qemu_host_page_size >= |
54936004 | 150 | TARGET_PAGE_SIZE */ |
67b915a5 | 151 | #ifdef _WIN32 |
d5a8f07c FB |
152 | { |
153 | SYSTEM_INFO system_info; | |
154 | DWORD old_protect; | |
155 | ||
156 | GetSystemInfo(&system_info); | |
157 | qemu_real_host_page_size = system_info.dwPageSize; | |
158 | ||
159 | VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer), | |
160 | PAGE_EXECUTE_READWRITE, &old_protect); | |
161 | } | |
67b915a5 | 162 | #else |
83fb7adf | 163 | qemu_real_host_page_size = getpagesize(); |
d5a8f07c FB |
164 | { |
165 | unsigned long start, end; | |
166 | ||
167 | start = (unsigned long)code_gen_buffer; | |
168 | start &= ~(qemu_real_host_page_size - 1); | |
169 | ||
170 | end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer); | |
171 | end += qemu_real_host_page_size - 1; | |
172 | end &= ~(qemu_real_host_page_size - 1); | |
173 | ||
174 | mprotect((void *)start, end - start, | |
175 | PROT_READ | PROT_WRITE | PROT_EXEC); | |
176 | } | |
67b915a5 | 177 | #endif |
d5a8f07c | 178 | |
83fb7adf FB |
179 | if (qemu_host_page_size == 0) |
180 | qemu_host_page_size = qemu_real_host_page_size; | |
181 | if (qemu_host_page_size < TARGET_PAGE_SIZE) | |
182 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
183 | qemu_host_page_bits = 0; | |
184 | while ((1 << qemu_host_page_bits) < qemu_host_page_size) | |
185 | qemu_host_page_bits++; | |
186 | qemu_host_page_mask = ~(qemu_host_page_size - 1); | |
108c49b8 FB |
187 | l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); |
188 | memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); | |
54936004 FB |
189 | } |
190 | ||
fd6ce8f6 | 191 | static inline PageDesc *page_find_alloc(unsigned int index) |
54936004 | 192 | { |
54936004 FB |
193 | PageDesc **lp, *p; |
194 | ||
54936004 FB |
195 | lp = &l1_map[index >> L2_BITS]; |
196 | p = *lp; | |
197 | if (!p) { | |
198 | /* allocate if not found */ | |
59817ccb | 199 | p = qemu_malloc(sizeof(PageDesc) * L2_SIZE); |
fd6ce8f6 | 200 | memset(p, 0, sizeof(PageDesc) * L2_SIZE); |
54936004 FB |
201 | *lp = p; |
202 | } | |
203 | return p + (index & (L2_SIZE - 1)); | |
204 | } | |
205 | ||
fd6ce8f6 | 206 | static inline PageDesc *page_find(unsigned int index) |
54936004 | 207 | { |
54936004 FB |
208 | PageDesc *p; |
209 | ||
54936004 FB |
210 | p = l1_map[index >> L2_BITS]; |
211 | if (!p) | |
212 | return 0; | |
fd6ce8f6 FB |
213 | return p + (index & (L2_SIZE - 1)); |
214 | } | |
215 | ||
108c49b8 | 216 | static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) |
92e873b9 | 217 | { |
108c49b8 | 218 | void **lp, **p; |
e3f4e2a4 | 219 | PhysPageDesc *pd; |
92e873b9 | 220 | |
108c49b8 FB |
221 | p = (void **)l1_phys_map; |
222 | #if TARGET_PHYS_ADDR_SPACE_BITS > 32 | |
223 | ||
224 | #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) | |
225 | #error unsupported TARGET_PHYS_ADDR_SPACE_BITS | |
226 | #endif | |
227 | lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); | |
92e873b9 FB |
228 | p = *lp; |
229 | if (!p) { | |
230 | /* allocate if not found */ | |
108c49b8 FB |
231 | if (!alloc) |
232 | return NULL; | |
233 | p = qemu_vmalloc(sizeof(void *) * L1_SIZE); | |
234 | memset(p, 0, sizeof(void *) * L1_SIZE); | |
235 | *lp = p; | |
236 | } | |
237 | #endif | |
238 | lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); | |
e3f4e2a4 PB |
239 | pd = *lp; |
240 | if (!pd) { | |
241 | int i; | |
108c49b8 FB |
242 | /* allocate if not found */ |
243 | if (!alloc) | |
244 | return NULL; | |
e3f4e2a4 PB |
245 | pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); |
246 | *lp = pd; | |
247 | for (i = 0; i < L2_SIZE; i++) | |
248 | pd[i].phys_offset = IO_MEM_UNASSIGNED; | |
92e873b9 | 249 | } |
e3f4e2a4 | 250 | return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); |
92e873b9 FB |
251 | } |
252 | ||
108c49b8 | 253 | static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) |
92e873b9 | 254 | { |
108c49b8 | 255 | return phys_page_find_alloc(index, 0); |
92e873b9 FB |
256 | } |
257 | ||
9fa3e853 | 258 | #if !defined(CONFIG_USER_ONLY) |
6a00d601 | 259 | static void tlb_protect_code(ram_addr_t ram_addr); |
3a7d929e FB |
260 | static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
261 | target_ulong vaddr); | |
9fa3e853 | 262 | #endif |
fd6ce8f6 | 263 | |
6a00d601 | 264 | void cpu_exec_init(CPUState *env) |
fd6ce8f6 | 265 | { |
6a00d601 FB |
266 | CPUState **penv; |
267 | int cpu_index; | |
268 | ||
fd6ce8f6 FB |
269 | if (!code_gen_ptr) { |
270 | code_gen_ptr = code_gen_buffer; | |
b346ff46 | 271 | page_init(); |
33417e70 | 272 | io_mem_init(); |
fd6ce8f6 | 273 | } |
6a00d601 FB |
274 | env->next_cpu = NULL; |
275 | penv = &first_cpu; | |
276 | cpu_index = 0; | |
277 | while (*penv != NULL) { | |
278 | penv = (CPUState **)&(*penv)->next_cpu; | |
279 | cpu_index++; | |
280 | } | |
281 | env->cpu_index = cpu_index; | |
6658ffb8 | 282 | env->nb_watchpoints = 0; |
6a00d601 | 283 | *penv = env; |
fd6ce8f6 FB |
284 | } |
285 | ||
9fa3e853 FB |
286 | static inline void invalidate_page_bitmap(PageDesc *p) |
287 | { | |
288 | if (p->code_bitmap) { | |
59817ccb | 289 | qemu_free(p->code_bitmap); |
9fa3e853 FB |
290 | p->code_bitmap = NULL; |
291 | } | |
292 | p->code_write_count = 0; | |
293 | } | |
294 | ||
fd6ce8f6 FB |
295 | /* set to NULL all the 'first_tb' fields in all PageDescs */ |
296 | static void page_flush_tb(void) | |
297 | { | |
298 | int i, j; | |
299 | PageDesc *p; | |
300 | ||
301 | for(i = 0; i < L1_SIZE; i++) { | |
302 | p = l1_map[i]; | |
303 | if (p) { | |
9fa3e853 FB |
304 | for(j = 0; j < L2_SIZE; j++) { |
305 | p->first_tb = NULL; | |
306 | invalidate_page_bitmap(p); | |
307 | p++; | |
308 | } | |
fd6ce8f6 FB |
309 | } |
310 | } | |
311 | } | |
312 | ||
313 | /* flush all the translation blocks */ | |
d4e8164f | 314 | /* XXX: tb_flush is currently not thread safe */ |
6a00d601 | 315 | void tb_flush(CPUState *env1) |
fd6ce8f6 | 316 | { |
6a00d601 | 317 | CPUState *env; |
0124311e | 318 | #if defined(DEBUG_FLUSH) |
fd6ce8f6 FB |
319 | printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", |
320 | code_gen_ptr - code_gen_buffer, | |
321 | nb_tbs, | |
0124311e | 322 | nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); |
fd6ce8f6 FB |
323 | #endif |
324 | nb_tbs = 0; | |
6a00d601 FB |
325 | |
326 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
327 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); | |
328 | } | |
9fa3e853 | 329 | |
8a8a608f | 330 | memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); |
fd6ce8f6 | 331 | page_flush_tb(); |
9fa3e853 | 332 | |
fd6ce8f6 | 333 | code_gen_ptr = code_gen_buffer; |
d4e8164f FB |
334 | /* XXX: flush processor icache at this point if cache flush is |
335 | expensive */ | |
e3db7226 | 336 | tb_flush_count++; |
fd6ce8f6 FB |
337 | } |
338 | ||
339 | #ifdef DEBUG_TB_CHECK | |
340 | ||
bc98a7ef | 341 | static void tb_invalidate_check(target_ulong address) |
fd6ce8f6 FB |
342 | { |
343 | TranslationBlock *tb; | |
344 | int i; | |
345 | address &= TARGET_PAGE_MASK; | |
99773bd4 PB |
346 | for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { |
347 | for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { | |
fd6ce8f6 FB |
348 | if (!(address + TARGET_PAGE_SIZE <= tb->pc || |
349 | address >= tb->pc + tb->size)) { | |
350 | printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n", | |
99773bd4 | 351 | address, (long)tb->pc, tb->size); |
fd6ce8f6 FB |
352 | } |
353 | } | |
354 | } | |
355 | } | |
356 | ||
357 | /* verify that all the pages have correct rights for code */ | |
358 | static void tb_page_check(void) | |
359 | { | |
360 | TranslationBlock *tb; | |
361 | int i, flags1, flags2; | |
362 | ||
99773bd4 PB |
363 | for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { |
364 | for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { | |
fd6ce8f6 FB |
365 | flags1 = page_get_flags(tb->pc); |
366 | flags2 = page_get_flags(tb->pc + tb->size - 1); | |
367 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | |
368 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | |
99773bd4 | 369 | (long)tb->pc, tb->size, flags1, flags2); |
fd6ce8f6 FB |
370 | } |
371 | } | |
372 | } | |
373 | } | |
374 | ||
d4e8164f FB |
375 | void tb_jmp_check(TranslationBlock *tb) |
376 | { | |
377 | TranslationBlock *tb1; | |
378 | unsigned int n1; | |
379 | ||
380 | /* suppress any remaining jumps to this TB */ | |
381 | tb1 = tb->jmp_first; | |
382 | for(;;) { | |
383 | n1 = (long)tb1 & 3; | |
384 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
385 | if (n1 == 2) | |
386 | break; | |
387 | tb1 = tb1->jmp_next[n1]; | |
388 | } | |
389 | /* check end of list */ | |
390 | if (tb1 != tb) { | |
391 | printf("ERROR: jmp_list from 0x%08lx\n", (long)tb); | |
392 | } | |
393 | } | |
394 | ||
fd6ce8f6 FB |
395 | #endif |
396 | ||
397 | /* invalidate one TB */ | |
398 | static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, | |
399 | int next_offset) | |
400 | { | |
401 | TranslationBlock *tb1; | |
402 | for(;;) { | |
403 | tb1 = *ptb; | |
404 | if (tb1 == tb) { | |
405 | *ptb = *(TranslationBlock **)((char *)tb1 + next_offset); | |
406 | break; | |
407 | } | |
408 | ptb = (TranslationBlock **)((char *)tb1 + next_offset); | |
409 | } | |
410 | } | |
411 | ||
9fa3e853 FB |
412 | static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) |
413 | { | |
414 | TranslationBlock *tb1; | |
415 | unsigned int n1; | |
416 | ||
417 | for(;;) { | |
418 | tb1 = *ptb; | |
419 | n1 = (long)tb1 & 3; | |
420 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
421 | if (tb1 == tb) { | |
422 | *ptb = tb1->page_next[n1]; | |
423 | break; | |
424 | } | |
425 | ptb = &tb1->page_next[n1]; | |
426 | } | |
427 | } | |
428 | ||
d4e8164f FB |
429 | static inline void tb_jmp_remove(TranslationBlock *tb, int n) |
430 | { | |
431 | TranslationBlock *tb1, **ptb; | |
432 | unsigned int n1; | |
433 | ||
434 | ptb = &tb->jmp_next[n]; | |
435 | tb1 = *ptb; | |
436 | if (tb1) { | |
437 | /* find tb(n) in circular list */ | |
438 | for(;;) { | |
439 | tb1 = *ptb; | |
440 | n1 = (long)tb1 & 3; | |
441 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
442 | if (n1 == n && tb1 == tb) | |
443 | break; | |
444 | if (n1 == 2) { | |
445 | ptb = &tb1->jmp_first; | |
446 | } else { | |
447 | ptb = &tb1->jmp_next[n1]; | |
448 | } | |
449 | } | |
450 | /* now we can suppress tb(n) from the list */ | |
451 | *ptb = tb->jmp_next[n]; | |
452 | ||
453 | tb->jmp_next[n] = NULL; | |
454 | } | |
455 | } | |
456 | ||
457 | /* reset the jump entry 'n' of a TB so that it is not chained to | |
458 | another TB */ | |
459 | static inline void tb_reset_jump(TranslationBlock *tb, int n) | |
460 | { | |
461 | tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); | |
462 | } | |
463 | ||
8a40a180 | 464 | static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) |
fd6ce8f6 | 465 | { |
6a00d601 | 466 | CPUState *env; |
8a40a180 | 467 | PageDesc *p; |
d4e8164f | 468 | unsigned int h, n1; |
8a40a180 FB |
469 | target_ulong phys_pc; |
470 | TranslationBlock *tb1, *tb2; | |
d4e8164f | 471 | |
8a40a180 FB |
472 | /* remove the TB from the hash list */ |
473 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
474 | h = tb_phys_hash_func(phys_pc); | |
475 | tb_remove(&tb_phys_hash[h], tb, | |
476 | offsetof(TranslationBlock, phys_hash_next)); | |
477 | ||
478 | /* remove the TB from the page list */ | |
479 | if (tb->page_addr[0] != page_addr) { | |
480 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | |
481 | tb_page_remove(&p->first_tb, tb); | |
482 | invalidate_page_bitmap(p); | |
483 | } | |
484 | if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { | |
485 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | |
486 | tb_page_remove(&p->first_tb, tb); | |
487 | invalidate_page_bitmap(p); | |
488 | } | |
489 | ||
36bdbe54 | 490 | tb_invalidated_flag = 1; |
59817ccb | 491 | |
fd6ce8f6 | 492 | /* remove the TB from the hash list */ |
8a40a180 | 493 | h = tb_jmp_cache_hash_func(tb->pc); |
6a00d601 FB |
494 | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
495 | if (env->tb_jmp_cache[h] == tb) | |
496 | env->tb_jmp_cache[h] = NULL; | |
497 | } | |
d4e8164f FB |
498 | |
499 | /* suppress this TB from the two jump lists */ | |
500 | tb_jmp_remove(tb, 0); | |
501 | tb_jmp_remove(tb, 1); | |
502 | ||
503 | /* suppress any remaining jumps to this TB */ | |
504 | tb1 = tb->jmp_first; | |
505 | for(;;) { | |
506 | n1 = (long)tb1 & 3; | |
507 | if (n1 == 2) | |
508 | break; | |
509 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
510 | tb2 = tb1->jmp_next[n1]; | |
511 | tb_reset_jump(tb1, n1); | |
512 | tb1->jmp_next[n1] = NULL; | |
513 | tb1 = tb2; | |
514 | } | |
515 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ | |
9fa3e853 | 516 | |
e3db7226 | 517 | tb_phys_invalidate_count++; |
9fa3e853 FB |
518 | } |
519 | ||
520 | static inline void set_bits(uint8_t *tab, int start, int len) | |
521 | { | |
522 | int end, mask, end1; | |
523 | ||
524 | end = start + len; | |
525 | tab += start >> 3; | |
526 | mask = 0xff << (start & 7); | |
527 | if ((start & ~7) == (end & ~7)) { | |
528 | if (start < end) { | |
529 | mask &= ~(0xff << (end & 7)); | |
530 | *tab |= mask; | |
531 | } | |
532 | } else { | |
533 | *tab++ |= mask; | |
534 | start = (start + 8) & ~7; | |
535 | end1 = end & ~7; | |
536 | while (start < end1) { | |
537 | *tab++ = 0xff; | |
538 | start += 8; | |
539 | } | |
540 | if (start < end) { | |
541 | mask = ~(0xff << (end & 7)); | |
542 | *tab |= mask; | |
543 | } | |
544 | } | |
545 | } | |
546 | ||
547 | static void build_page_bitmap(PageDesc *p) | |
548 | { | |
549 | int n, tb_start, tb_end; | |
550 | TranslationBlock *tb; | |
551 | ||
59817ccb | 552 | p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8); |
9fa3e853 FB |
553 | if (!p->code_bitmap) |
554 | return; | |
555 | memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8); | |
556 | ||
557 | tb = p->first_tb; | |
558 | while (tb != NULL) { | |
559 | n = (long)tb & 3; | |
560 | tb = (TranslationBlock *)((long)tb & ~3); | |
561 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
562 | if (n == 0) { | |
563 | /* NOTE: tb_end may be after the end of the page, but | |
564 | it is not a problem */ | |
565 | tb_start = tb->pc & ~TARGET_PAGE_MASK; | |
566 | tb_end = tb_start + tb->size; | |
567 | if (tb_end > TARGET_PAGE_SIZE) | |
568 | tb_end = TARGET_PAGE_SIZE; | |
569 | } else { | |
570 | tb_start = 0; | |
571 | tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
572 | } | |
573 | set_bits(p->code_bitmap, tb_start, tb_end - tb_start); | |
574 | tb = tb->page_next[n]; | |
575 | } | |
576 | } | |
577 | ||
d720b93d FB |
578 | #ifdef TARGET_HAS_PRECISE_SMC |
579 | ||
580 | static void tb_gen_code(CPUState *env, | |
581 | target_ulong pc, target_ulong cs_base, int flags, | |
582 | int cflags) | |
583 | { | |
584 | TranslationBlock *tb; | |
585 | uint8_t *tc_ptr; | |
586 | target_ulong phys_pc, phys_page2, virt_page2; | |
587 | int code_gen_size; | |
588 | ||
c27004ec FB |
589 | phys_pc = get_phys_addr_code(env, pc); |
590 | tb = tb_alloc(pc); | |
d720b93d FB |
591 | if (!tb) { |
592 | /* flush must be done */ | |
593 | tb_flush(env); | |
594 | /* cannot fail at this point */ | |
c27004ec | 595 | tb = tb_alloc(pc); |
d720b93d FB |
596 | } |
597 | tc_ptr = code_gen_ptr; | |
598 | tb->tc_ptr = tc_ptr; | |
599 | tb->cs_base = cs_base; | |
600 | tb->flags = flags; | |
601 | tb->cflags = cflags; | |
602 | cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); | |
603 | code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); | |
604 | ||
605 | /* check next page if needed */ | |
c27004ec | 606 | virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; |
d720b93d | 607 | phys_page2 = -1; |
c27004ec | 608 | if ((pc & TARGET_PAGE_MASK) != virt_page2) { |
d720b93d FB |
609 | phys_page2 = get_phys_addr_code(env, virt_page2); |
610 | } | |
611 | tb_link_phys(tb, phys_pc, phys_page2); | |
612 | } | |
613 | #endif | |
614 | ||
9fa3e853 FB |
615 | /* invalidate all TBs which intersect with the target physical page |
616 | starting in range [start;end[. NOTE: start and end must refer to | |
d720b93d FB |
617 | the same physical page. 'is_cpu_write_access' should be true if called |
618 | from a real cpu write access: the virtual CPU will exit the current | |
619 | TB if code is modified inside this TB. */ | |
620 | void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, | |
621 | int is_cpu_write_access) | |
622 | { | |
623 | int n, current_tb_modified, current_tb_not_found, current_flags; | |
d720b93d | 624 | CPUState *env = cpu_single_env; |
9fa3e853 | 625 | PageDesc *p; |
ea1c1802 | 626 | TranslationBlock *tb, *tb_next, *current_tb, *saved_tb; |
9fa3e853 | 627 | target_ulong tb_start, tb_end; |
d720b93d | 628 | target_ulong current_pc, current_cs_base; |
9fa3e853 FB |
629 | |
630 | p = page_find(start >> TARGET_PAGE_BITS); | |
631 | if (!p) | |
632 | return; | |
633 | if (!p->code_bitmap && | |
d720b93d FB |
634 | ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && |
635 | is_cpu_write_access) { | |
9fa3e853 FB |
636 | /* build code bitmap */ |
637 | build_page_bitmap(p); | |
638 | } | |
639 | ||
640 | /* we remove all the TBs in the range [start, end[ */ | |
641 | /* XXX: see if in some cases it could be faster to invalidate all the code */ | |
d720b93d FB |
642 | current_tb_not_found = is_cpu_write_access; |
643 | current_tb_modified = 0; | |
644 | current_tb = NULL; /* avoid warning */ | |
645 | current_pc = 0; /* avoid warning */ | |
646 | current_cs_base = 0; /* avoid warning */ | |
647 | current_flags = 0; /* avoid warning */ | |
9fa3e853 FB |
648 | tb = p->first_tb; |
649 | while (tb != NULL) { | |
650 | n = (long)tb & 3; | |
651 | tb = (TranslationBlock *)((long)tb & ~3); | |
652 | tb_next = tb->page_next[n]; | |
653 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
654 | if (n == 0) { | |
655 | /* NOTE: tb_end may be after the end of the page, but | |
656 | it is not a problem */ | |
657 | tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
658 | tb_end = tb_start + tb->size; | |
659 | } else { | |
660 | tb_start = tb->page_addr[1]; | |
661 | tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
662 | } | |
663 | if (!(tb_end <= start || tb_start >= end)) { | |
d720b93d FB |
664 | #ifdef TARGET_HAS_PRECISE_SMC |
665 | if (current_tb_not_found) { | |
666 | current_tb_not_found = 0; | |
667 | current_tb = NULL; | |
668 | if (env->mem_write_pc) { | |
669 | /* now we have a real cpu fault */ | |
670 | current_tb = tb_find_pc(env->mem_write_pc); | |
671 | } | |
672 | } | |
673 | if (current_tb == tb && | |
674 | !(current_tb->cflags & CF_SINGLE_INSN)) { | |
675 | /* If we are modifying the current TB, we must stop | |
676 | its execution. We could be more precise by checking | |
677 | that the modification is after the current PC, but it | |
678 | would require a specialized function to partially | |
679 | restore the CPU state */ | |
680 | ||
681 | current_tb_modified = 1; | |
682 | cpu_restore_state(current_tb, env, | |
683 | env->mem_write_pc, NULL); | |
684 | #if defined(TARGET_I386) | |
685 | current_flags = env->hflags; | |
686 | current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); | |
687 | current_cs_base = (target_ulong)env->segs[R_CS].base; | |
688 | current_pc = current_cs_base + env->eip; | |
689 | #else | |
690 | #error unsupported CPU | |
691 | #endif | |
692 | } | |
693 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
6f5a9f7e FB |
694 | /* we need to do that to handle the case where a signal |
695 | occurs while doing tb_phys_invalidate() */ | |
696 | saved_tb = NULL; | |
697 | if (env) { | |
698 | saved_tb = env->current_tb; | |
699 | env->current_tb = NULL; | |
700 | } | |
9fa3e853 | 701 | tb_phys_invalidate(tb, -1); |
6f5a9f7e FB |
702 | if (env) { |
703 | env->current_tb = saved_tb; | |
704 | if (env->interrupt_request && env->current_tb) | |
705 | cpu_interrupt(env, env->interrupt_request); | |
706 | } | |
9fa3e853 FB |
707 | } |
708 | tb = tb_next; | |
709 | } | |
710 | #if !defined(CONFIG_USER_ONLY) | |
711 | /* if no code remaining, no need to continue to use slow writes */ | |
712 | if (!p->first_tb) { | |
713 | invalidate_page_bitmap(p); | |
d720b93d FB |
714 | if (is_cpu_write_access) { |
715 | tlb_unprotect_code_phys(env, start, env->mem_write_vaddr); | |
716 | } | |
717 | } | |
718 | #endif | |
719 | #ifdef TARGET_HAS_PRECISE_SMC | |
720 | if (current_tb_modified) { | |
721 | /* we generate a block containing just the instruction | |
722 | modifying the memory. It will ensure that it cannot modify | |
723 | itself */ | |
ea1c1802 | 724 | env->current_tb = NULL; |
d720b93d FB |
725 | tb_gen_code(env, current_pc, current_cs_base, current_flags, |
726 | CF_SINGLE_INSN); | |
727 | cpu_resume_from_signal(env, NULL); | |
9fa3e853 | 728 | } |
fd6ce8f6 | 729 | #endif |
9fa3e853 | 730 | } |
fd6ce8f6 | 731 | |
9fa3e853 | 732 | /* len must be <= 8 and start must be a multiple of len */ |
d720b93d | 733 | static inline void tb_invalidate_phys_page_fast(target_ulong start, int len) |
9fa3e853 FB |
734 | { |
735 | PageDesc *p; | |
736 | int offset, b; | |
59817ccb | 737 | #if 0 |
a4193c8a FB |
738 | if (1) { |
739 | if (loglevel) { | |
740 | fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", | |
741 | cpu_single_env->mem_write_vaddr, len, | |
742 | cpu_single_env->eip, | |
743 | cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); | |
744 | } | |
59817ccb FB |
745 | } |
746 | #endif | |
9fa3e853 FB |
747 | p = page_find(start >> TARGET_PAGE_BITS); |
748 | if (!p) | |
749 | return; | |
750 | if (p->code_bitmap) { | |
751 | offset = start & ~TARGET_PAGE_MASK; | |
752 | b = p->code_bitmap[offset >> 3] >> (offset & 7); | |
753 | if (b & ((1 << len) - 1)) | |
754 | goto do_invalidate; | |
755 | } else { | |
756 | do_invalidate: | |
d720b93d | 757 | tb_invalidate_phys_page_range(start, start + len, 1); |
9fa3e853 FB |
758 | } |
759 | } | |
760 | ||
9fa3e853 | 761 | #if !defined(CONFIG_SOFTMMU) |
d720b93d FB |
762 | static void tb_invalidate_phys_page(target_ulong addr, |
763 | unsigned long pc, void *puc) | |
9fa3e853 | 764 | { |
d720b93d FB |
765 | int n, current_flags, current_tb_modified; |
766 | target_ulong current_pc, current_cs_base; | |
9fa3e853 | 767 | PageDesc *p; |
d720b93d FB |
768 | TranslationBlock *tb, *current_tb; |
769 | #ifdef TARGET_HAS_PRECISE_SMC | |
770 | CPUState *env = cpu_single_env; | |
771 | #endif | |
9fa3e853 FB |
772 | |
773 | addr &= TARGET_PAGE_MASK; | |
774 | p = page_find(addr >> TARGET_PAGE_BITS); | |
775 | if (!p) | |
776 | return; | |
777 | tb = p->first_tb; | |
d720b93d FB |
778 | current_tb_modified = 0; |
779 | current_tb = NULL; | |
780 | current_pc = 0; /* avoid warning */ | |
781 | current_cs_base = 0; /* avoid warning */ | |
782 | current_flags = 0; /* avoid warning */ | |
783 | #ifdef TARGET_HAS_PRECISE_SMC | |
784 | if (tb && pc != 0) { | |
785 | current_tb = tb_find_pc(pc); | |
786 | } | |
787 | #endif | |
9fa3e853 FB |
788 | while (tb != NULL) { |
789 | n = (long)tb & 3; | |
790 | tb = (TranslationBlock *)((long)tb & ~3); | |
d720b93d FB |
791 | #ifdef TARGET_HAS_PRECISE_SMC |
792 | if (current_tb == tb && | |
793 | !(current_tb->cflags & CF_SINGLE_INSN)) { | |
794 | /* If we are modifying the current TB, we must stop | |
795 | its execution. We could be more precise by checking | |
796 | that the modification is after the current PC, but it | |
797 | would require a specialized function to partially | |
798 | restore the CPU state */ | |
799 | ||
800 | current_tb_modified = 1; | |
801 | cpu_restore_state(current_tb, env, pc, puc); | |
802 | #if defined(TARGET_I386) | |
803 | current_flags = env->hflags; | |
804 | current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); | |
805 | current_cs_base = (target_ulong)env->segs[R_CS].base; | |
806 | current_pc = current_cs_base + env->eip; | |
807 | #else | |
808 | #error unsupported CPU | |
809 | #endif | |
810 | } | |
811 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
9fa3e853 FB |
812 | tb_phys_invalidate(tb, addr); |
813 | tb = tb->page_next[n]; | |
814 | } | |
fd6ce8f6 | 815 | p->first_tb = NULL; |
d720b93d FB |
816 | #ifdef TARGET_HAS_PRECISE_SMC |
817 | if (current_tb_modified) { | |
818 | /* we generate a block containing just the instruction | |
819 | modifying the memory. It will ensure that it cannot modify | |
820 | itself */ | |
ea1c1802 | 821 | env->current_tb = NULL; |
d720b93d FB |
822 | tb_gen_code(env, current_pc, current_cs_base, current_flags, |
823 | CF_SINGLE_INSN); | |
824 | cpu_resume_from_signal(env, puc); | |
825 | } | |
826 | #endif | |
fd6ce8f6 | 827 | } |
9fa3e853 | 828 | #endif |
fd6ce8f6 FB |
829 | |
830 | /* add the tb in the target page and protect it if necessary */ | |
9fa3e853 | 831 | static inline void tb_alloc_page(TranslationBlock *tb, |
53a5960a | 832 | unsigned int n, target_ulong page_addr) |
fd6ce8f6 FB |
833 | { |
834 | PageDesc *p; | |
9fa3e853 FB |
835 | TranslationBlock *last_first_tb; |
836 | ||
837 | tb->page_addr[n] = page_addr; | |
3a7d929e | 838 | p = page_find_alloc(page_addr >> TARGET_PAGE_BITS); |
9fa3e853 FB |
839 | tb->page_next[n] = p->first_tb; |
840 | last_first_tb = p->first_tb; | |
841 | p->first_tb = (TranslationBlock *)((long)tb | n); | |
842 | invalidate_page_bitmap(p); | |
fd6ce8f6 | 843 | |
107db443 | 844 | #if defined(TARGET_HAS_SMC) || 1 |
d720b93d | 845 | |
9fa3e853 | 846 | #if defined(CONFIG_USER_ONLY) |
fd6ce8f6 | 847 | if (p->flags & PAGE_WRITE) { |
53a5960a PB |
848 | target_ulong addr; |
849 | PageDesc *p2; | |
9fa3e853 FB |
850 | int prot; |
851 | ||
fd6ce8f6 FB |
852 | /* force the host page as non writable (writes will have a |
853 | page fault + mprotect overhead) */ | |
53a5960a | 854 | page_addr &= qemu_host_page_mask; |
fd6ce8f6 | 855 | prot = 0; |
53a5960a PB |
856 | for(addr = page_addr; addr < page_addr + qemu_host_page_size; |
857 | addr += TARGET_PAGE_SIZE) { | |
858 | ||
859 | p2 = page_find (addr >> TARGET_PAGE_BITS); | |
860 | if (!p2) | |
861 | continue; | |
862 | prot |= p2->flags; | |
863 | p2->flags &= ~PAGE_WRITE; | |
864 | page_get_flags(addr); | |
865 | } | |
866 | mprotect(g2h(page_addr), qemu_host_page_size, | |
fd6ce8f6 FB |
867 | (prot & PAGE_BITS) & ~PAGE_WRITE); |
868 | #ifdef DEBUG_TB_INVALIDATE | |
869 | printf("protecting code page: 0x%08lx\n", | |
53a5960a | 870 | page_addr); |
fd6ce8f6 | 871 | #endif |
fd6ce8f6 | 872 | } |
9fa3e853 FB |
873 | #else |
874 | /* if some code is already present, then the pages are already | |
875 | protected. So we handle the case where only the first TB is | |
876 | allocated in a physical page */ | |
877 | if (!last_first_tb) { | |
6a00d601 | 878 | tlb_protect_code(page_addr); |
9fa3e853 FB |
879 | } |
880 | #endif | |
d720b93d FB |
881 | |
882 | #endif /* TARGET_HAS_SMC */ | |
fd6ce8f6 FB |
883 | } |
884 | ||
885 | /* Allocate a new translation block. Flush the translation buffer if | |
886 | too many translation blocks or too much generated code. */ | |
c27004ec | 887 | TranslationBlock *tb_alloc(target_ulong pc) |
fd6ce8f6 FB |
888 | { |
889 | TranslationBlock *tb; | |
fd6ce8f6 FB |
890 | |
891 | if (nb_tbs >= CODE_GEN_MAX_BLOCKS || | |
892 | (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE) | |
d4e8164f | 893 | return NULL; |
fd6ce8f6 FB |
894 | tb = &tbs[nb_tbs++]; |
895 | tb->pc = pc; | |
b448f2f3 | 896 | tb->cflags = 0; |
d4e8164f FB |
897 | return tb; |
898 | } | |
899 | ||
9fa3e853 FB |
900 | /* add a new TB and link it to the physical page tables. phys_page2 is |
901 | (-1) to indicate that only one page contains the TB. */ | |
902 | void tb_link_phys(TranslationBlock *tb, | |
903 | target_ulong phys_pc, target_ulong phys_page2) | |
d4e8164f | 904 | { |
9fa3e853 FB |
905 | unsigned int h; |
906 | TranslationBlock **ptb; | |
907 | ||
908 | /* add in the physical hash table */ | |
909 | h = tb_phys_hash_func(phys_pc); | |
910 | ptb = &tb_phys_hash[h]; | |
911 | tb->phys_hash_next = *ptb; | |
912 | *ptb = tb; | |
fd6ce8f6 FB |
913 | |
914 | /* add in the page list */ | |
9fa3e853 FB |
915 | tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); |
916 | if (phys_page2 != -1) | |
917 | tb_alloc_page(tb, 1, phys_page2); | |
918 | else | |
919 | tb->page_addr[1] = -1; | |
9fa3e853 | 920 | |
d4e8164f FB |
921 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
922 | tb->jmp_next[0] = NULL; | |
923 | tb->jmp_next[1] = NULL; | |
b448f2f3 FB |
924 | #ifdef USE_CODE_COPY |
925 | tb->cflags &= ~CF_FP_USED; | |
926 | if (tb->cflags & CF_TB_FP_USED) | |
927 | tb->cflags |= CF_FP_USED; | |
928 | #endif | |
d4e8164f FB |
929 | |
930 | /* init original jump addresses */ | |
931 | if (tb->tb_next_offset[0] != 0xffff) | |
932 | tb_reset_jump(tb, 0); | |
933 | if (tb->tb_next_offset[1] != 0xffff) | |
934 | tb_reset_jump(tb, 1); | |
8a40a180 FB |
935 | |
936 | #ifdef DEBUG_TB_CHECK | |
937 | tb_page_check(); | |
938 | #endif | |
fd6ce8f6 FB |
939 | } |
940 | ||
9fa3e853 FB |
941 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
942 | tb[1].tc_ptr. Return NULL if not found */ | |
943 | TranslationBlock *tb_find_pc(unsigned long tc_ptr) | |
fd6ce8f6 | 944 | { |
9fa3e853 FB |
945 | int m_min, m_max, m; |
946 | unsigned long v; | |
947 | TranslationBlock *tb; | |
a513fe19 FB |
948 | |
949 | if (nb_tbs <= 0) | |
950 | return NULL; | |
951 | if (tc_ptr < (unsigned long)code_gen_buffer || | |
952 | tc_ptr >= (unsigned long)code_gen_ptr) | |
953 | return NULL; | |
954 | /* binary search (cf Knuth) */ | |
955 | m_min = 0; | |
956 | m_max = nb_tbs - 1; | |
957 | while (m_min <= m_max) { | |
958 | m = (m_min + m_max) >> 1; | |
959 | tb = &tbs[m]; | |
960 | v = (unsigned long)tb->tc_ptr; | |
961 | if (v == tc_ptr) | |
962 | return tb; | |
963 | else if (tc_ptr < v) { | |
964 | m_max = m - 1; | |
965 | } else { | |
966 | m_min = m + 1; | |
967 | } | |
968 | } | |
969 | return &tbs[m_max]; | |
970 | } | |
7501267e | 971 | |
ea041c0e FB |
972 | static void tb_reset_jump_recursive(TranslationBlock *tb); |
973 | ||
974 | static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) | |
975 | { | |
976 | TranslationBlock *tb1, *tb_next, **ptb; | |
977 | unsigned int n1; | |
978 | ||
979 | tb1 = tb->jmp_next[n]; | |
980 | if (tb1 != NULL) { | |
981 | /* find head of list */ | |
982 | for(;;) { | |
983 | n1 = (long)tb1 & 3; | |
984 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
985 | if (n1 == 2) | |
986 | break; | |
987 | tb1 = tb1->jmp_next[n1]; | |
988 | } | |
989 | /* we are now sure now that tb jumps to tb1 */ | |
990 | tb_next = tb1; | |
991 | ||
992 | /* remove tb from the jmp_first list */ | |
993 | ptb = &tb_next->jmp_first; | |
994 | for(;;) { | |
995 | tb1 = *ptb; | |
996 | n1 = (long)tb1 & 3; | |
997 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
998 | if (n1 == n && tb1 == tb) | |
999 | break; | |
1000 | ptb = &tb1->jmp_next[n1]; | |
1001 | } | |
1002 | *ptb = tb->jmp_next[n]; | |
1003 | tb->jmp_next[n] = NULL; | |
1004 | ||
1005 | /* suppress the jump to next tb in generated code */ | |
1006 | tb_reset_jump(tb, n); | |
1007 | ||
0124311e | 1008 | /* suppress jumps in the tb on which we could have jumped */ |
ea041c0e FB |
1009 | tb_reset_jump_recursive(tb_next); |
1010 | } | |
1011 | } | |
1012 | ||
1013 | static void tb_reset_jump_recursive(TranslationBlock *tb) | |
1014 | { | |
1015 | tb_reset_jump_recursive2(tb, 0); | |
1016 | tb_reset_jump_recursive2(tb, 1); | |
1017 | } | |
1018 | ||
1fddef4b | 1019 | #if defined(TARGET_HAS_ICE) |
d720b93d FB |
1020 | static void breakpoint_invalidate(CPUState *env, target_ulong pc) |
1021 | { | |
c2f07f81 PB |
1022 | target_ulong addr, pd; |
1023 | ram_addr_t ram_addr; | |
1024 | PhysPageDesc *p; | |
d720b93d | 1025 | |
c2f07f81 PB |
1026 | addr = cpu_get_phys_page_debug(env, pc); |
1027 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
1028 | if (!p) { | |
1029 | pd = IO_MEM_UNASSIGNED; | |
1030 | } else { | |
1031 | pd = p->phys_offset; | |
1032 | } | |
1033 | ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK); | |
706cd4b5 | 1034 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
d720b93d | 1035 | } |
c27004ec | 1036 | #endif |
d720b93d | 1037 | |
6658ffb8 PB |
1038 | /* Add a watchpoint. */ |
1039 | int cpu_watchpoint_insert(CPUState *env, target_ulong addr) | |
1040 | { | |
1041 | int i; | |
1042 | ||
1043 | for (i = 0; i < env->nb_watchpoints; i++) { | |
1044 | if (addr == env->watchpoint[i].vaddr) | |
1045 | return 0; | |
1046 | } | |
1047 | if (env->nb_watchpoints >= MAX_WATCHPOINTS) | |
1048 | return -1; | |
1049 | ||
1050 | i = env->nb_watchpoints++; | |
1051 | env->watchpoint[i].vaddr = addr; | |
1052 | tlb_flush_page(env, addr); | |
1053 | /* FIXME: This flush is needed because of the hack to make memory ops | |
1054 | terminate the TB. It can be removed once the proper IO trap and | |
1055 | re-execute bits are in. */ | |
1056 | tb_flush(env); | |
1057 | return i; | |
1058 | } | |
1059 | ||
1060 | /* Remove a watchpoint. */ | |
1061 | int cpu_watchpoint_remove(CPUState *env, target_ulong addr) | |
1062 | { | |
1063 | int i; | |
1064 | ||
1065 | for (i = 0; i < env->nb_watchpoints; i++) { | |
1066 | if (addr == env->watchpoint[i].vaddr) { | |
1067 | env->nb_watchpoints--; | |
1068 | env->watchpoint[i] = env->watchpoint[env->nb_watchpoints]; | |
1069 | tlb_flush_page(env, addr); | |
1070 | return 0; | |
1071 | } | |
1072 | } | |
1073 | return -1; | |
1074 | } | |
1075 | ||
c33a346e FB |
1076 | /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a |
1077 | breakpoint is reached */ | |
2e12669a | 1078 | int cpu_breakpoint_insert(CPUState *env, target_ulong pc) |
4c3a88a2 | 1079 | { |
1fddef4b | 1080 | #if defined(TARGET_HAS_ICE) |
4c3a88a2 | 1081 | int i; |
d720b93d | 1082 | |
4c3a88a2 FB |
1083 | for(i = 0; i < env->nb_breakpoints; i++) { |
1084 | if (env->breakpoints[i] == pc) | |
1085 | return 0; | |
1086 | } | |
1087 | ||
1088 | if (env->nb_breakpoints >= MAX_BREAKPOINTS) | |
1089 | return -1; | |
1090 | env->breakpoints[env->nb_breakpoints++] = pc; | |
d720b93d FB |
1091 | |
1092 | breakpoint_invalidate(env, pc); | |
4c3a88a2 FB |
1093 | return 0; |
1094 | #else | |
1095 | return -1; | |
1096 | #endif | |
1097 | } | |
1098 | ||
1099 | /* remove a breakpoint */ | |
2e12669a | 1100 | int cpu_breakpoint_remove(CPUState *env, target_ulong pc) |
4c3a88a2 | 1101 | { |
1fddef4b | 1102 | #if defined(TARGET_HAS_ICE) |
4c3a88a2 FB |
1103 | int i; |
1104 | for(i = 0; i < env->nb_breakpoints; i++) { | |
1105 | if (env->breakpoints[i] == pc) | |
1106 | goto found; | |
1107 | } | |
1108 | return -1; | |
1109 | found: | |
4c3a88a2 | 1110 | env->nb_breakpoints--; |
1fddef4b FB |
1111 | if (i < env->nb_breakpoints) |
1112 | env->breakpoints[i] = env->breakpoints[env->nb_breakpoints]; | |
d720b93d FB |
1113 | |
1114 | breakpoint_invalidate(env, pc); | |
4c3a88a2 FB |
1115 | return 0; |
1116 | #else | |
1117 | return -1; | |
1118 | #endif | |
1119 | } | |
1120 | ||
c33a346e FB |
1121 | /* enable or disable single step mode. EXCP_DEBUG is returned by the |
1122 | CPU loop after each instruction */ | |
1123 | void cpu_single_step(CPUState *env, int enabled) | |
1124 | { | |
1fddef4b | 1125 | #if defined(TARGET_HAS_ICE) |
c33a346e FB |
1126 | if (env->singlestep_enabled != enabled) { |
1127 | env->singlestep_enabled = enabled; | |
1128 | /* must flush all the translated code to avoid inconsistancies */ | |
9fa3e853 | 1129 | /* XXX: only flush what is necessary */ |
0124311e | 1130 | tb_flush(env); |
c33a346e FB |
1131 | } |
1132 | #endif | |
1133 | } | |
1134 | ||
34865134 FB |
1135 | /* enable or disable low levels log */ |
1136 | void cpu_set_log(int log_flags) | |
1137 | { | |
1138 | loglevel = log_flags; | |
1139 | if (loglevel && !logfile) { | |
1140 | logfile = fopen(logfilename, "w"); | |
1141 | if (!logfile) { | |
1142 | perror(logfilename); | |
1143 | _exit(1); | |
1144 | } | |
9fa3e853 FB |
1145 | #if !defined(CONFIG_SOFTMMU) |
1146 | /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ | |
1147 | { | |
1148 | static uint8_t logfile_buf[4096]; | |
1149 | setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); | |
1150 | } | |
1151 | #else | |
34865134 | 1152 | setvbuf(logfile, NULL, _IOLBF, 0); |
9fa3e853 | 1153 | #endif |
34865134 FB |
1154 | } |
1155 | } | |
1156 | ||
1157 | void cpu_set_log_filename(const char *filename) | |
1158 | { | |
1159 | logfilename = strdup(filename); | |
1160 | } | |
c33a346e | 1161 | |
0124311e | 1162 | /* mask must never be zero, except for A20 change call */ |
68a79315 | 1163 | void cpu_interrupt(CPUState *env, int mask) |
ea041c0e FB |
1164 | { |
1165 | TranslationBlock *tb; | |
ee8b7021 | 1166 | static int interrupt_lock; |
59817ccb | 1167 | |
68a79315 | 1168 | env->interrupt_request |= mask; |
ea041c0e FB |
1169 | /* if the cpu is currently executing code, we must unlink it and |
1170 | all the potentially executing TB */ | |
1171 | tb = env->current_tb; | |
ee8b7021 FB |
1172 | if (tb && !testandset(&interrupt_lock)) { |
1173 | env->current_tb = NULL; | |
ea041c0e | 1174 | tb_reset_jump_recursive(tb); |
ee8b7021 | 1175 | interrupt_lock = 0; |
ea041c0e FB |
1176 | } |
1177 | } | |
1178 | ||
b54ad049 FB |
1179 | void cpu_reset_interrupt(CPUState *env, int mask) |
1180 | { | |
1181 | env->interrupt_request &= ~mask; | |
1182 | } | |
1183 | ||
f193c797 FB |
1184 | CPULogItem cpu_log_items[] = { |
1185 | { CPU_LOG_TB_OUT_ASM, "out_asm", | |
1186 | "show generated host assembly code for each compiled TB" }, | |
1187 | { CPU_LOG_TB_IN_ASM, "in_asm", | |
1188 | "show target assembly code for each compiled TB" }, | |
1189 | { CPU_LOG_TB_OP, "op", | |
1190 | "show micro ops for each compiled TB (only usable if 'in_asm' used)" }, | |
1191 | #ifdef TARGET_I386 | |
1192 | { CPU_LOG_TB_OP_OPT, "op_opt", | |
1193 | "show micro ops after optimization for each compiled TB" }, | |
1194 | #endif | |
1195 | { CPU_LOG_INT, "int", | |
1196 | "show interrupts/exceptions in short format" }, | |
1197 | { CPU_LOG_EXEC, "exec", | |
1198 | "show trace before each executed TB (lots of logs)" }, | |
9fddaa0c FB |
1199 | { CPU_LOG_TB_CPU, "cpu", |
1200 | "show CPU state before bloc translation" }, | |
f193c797 FB |
1201 | #ifdef TARGET_I386 |
1202 | { CPU_LOG_PCALL, "pcall", | |
1203 | "show protected mode far calls/returns/exceptions" }, | |
1204 | #endif | |
8e3a9fd2 | 1205 | #ifdef DEBUG_IOPORT |
fd872598 FB |
1206 | { CPU_LOG_IOPORT, "ioport", |
1207 | "show all i/o ports accesses" }, | |
8e3a9fd2 | 1208 | #endif |
f193c797 FB |
1209 | { 0, NULL, NULL }, |
1210 | }; | |
1211 | ||
1212 | static int cmp1(const char *s1, int n, const char *s2) | |
1213 | { | |
1214 | if (strlen(s2) != n) | |
1215 | return 0; | |
1216 | return memcmp(s1, s2, n) == 0; | |
1217 | } | |
1218 | ||
1219 | /* takes a comma separated list of log masks. Return 0 if error. */ | |
1220 | int cpu_str_to_log_mask(const char *str) | |
1221 | { | |
1222 | CPULogItem *item; | |
1223 | int mask; | |
1224 | const char *p, *p1; | |
1225 | ||
1226 | p = str; | |
1227 | mask = 0; | |
1228 | for(;;) { | |
1229 | p1 = strchr(p, ','); | |
1230 | if (!p1) | |
1231 | p1 = p + strlen(p); | |
8e3a9fd2 FB |
1232 | if(cmp1(p,p1-p,"all")) { |
1233 | for(item = cpu_log_items; item->mask != 0; item++) { | |
1234 | mask |= item->mask; | |
1235 | } | |
1236 | } else { | |
f193c797 FB |
1237 | for(item = cpu_log_items; item->mask != 0; item++) { |
1238 | if (cmp1(p, p1 - p, item->name)) | |
1239 | goto found; | |
1240 | } | |
1241 | return 0; | |
8e3a9fd2 | 1242 | } |
f193c797 FB |
1243 | found: |
1244 | mask |= item->mask; | |
1245 | if (*p1 != ',') | |
1246 | break; | |
1247 | p = p1 + 1; | |
1248 | } | |
1249 | return mask; | |
1250 | } | |
ea041c0e | 1251 | |
7501267e FB |
1252 | void cpu_abort(CPUState *env, const char *fmt, ...) |
1253 | { | |
1254 | va_list ap; | |
1255 | ||
1256 | va_start(ap, fmt); | |
1257 | fprintf(stderr, "qemu: fatal: "); | |
1258 | vfprintf(stderr, fmt, ap); | |
1259 | fprintf(stderr, "\n"); | |
1260 | #ifdef TARGET_I386 | |
7fe48483 FB |
1261 | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP); |
1262 | #else | |
1263 | cpu_dump_state(env, stderr, fprintf, 0); | |
7501267e FB |
1264 | #endif |
1265 | va_end(ap); | |
1266 | abort(); | |
1267 | } | |
1268 | ||
c5be9f08 TS |
1269 | CPUState *cpu_copy(CPUState *env) |
1270 | { | |
1271 | CPUState *new_env = cpu_init(); | |
1272 | /* preserve chaining and index */ | |
1273 | CPUState *next_cpu = new_env->next_cpu; | |
1274 | int cpu_index = new_env->cpu_index; | |
1275 | memcpy(new_env, env, sizeof(CPUState)); | |
1276 | new_env->next_cpu = next_cpu; | |
1277 | new_env->cpu_index = cpu_index; | |
1278 | return new_env; | |
1279 | } | |
1280 | ||
0124311e FB |
1281 | #if !defined(CONFIG_USER_ONLY) |
1282 | ||
ee8b7021 FB |
1283 | /* NOTE: if flush_global is true, also flush global entries (not |
1284 | implemented yet) */ | |
1285 | void tlb_flush(CPUState *env, int flush_global) | |
33417e70 | 1286 | { |
33417e70 | 1287 | int i; |
0124311e | 1288 | |
9fa3e853 FB |
1289 | #if defined(DEBUG_TLB) |
1290 | printf("tlb_flush:\n"); | |
1291 | #endif | |
0124311e FB |
1292 | /* must reset current TB so that interrupts cannot modify the |
1293 | links while we are modifying them */ | |
1294 | env->current_tb = NULL; | |
1295 | ||
33417e70 | 1296 | for(i = 0; i < CPU_TLB_SIZE; i++) { |
84b7b8e7 FB |
1297 | env->tlb_table[0][i].addr_read = -1; |
1298 | env->tlb_table[0][i].addr_write = -1; | |
1299 | env->tlb_table[0][i].addr_code = -1; | |
1300 | env->tlb_table[1][i].addr_read = -1; | |
1301 | env->tlb_table[1][i].addr_write = -1; | |
1302 | env->tlb_table[1][i].addr_code = -1; | |
6fa4cea9 JM |
1303 | #if (NB_MMU_MODES >= 3) |
1304 | env->tlb_table[2][i].addr_read = -1; | |
1305 | env->tlb_table[2][i].addr_write = -1; | |
1306 | env->tlb_table[2][i].addr_code = -1; | |
1307 | #if (NB_MMU_MODES == 4) | |
1308 | env->tlb_table[3][i].addr_read = -1; | |
1309 | env->tlb_table[3][i].addr_write = -1; | |
1310 | env->tlb_table[3][i].addr_code = -1; | |
1311 | #endif | |
1312 | #endif | |
33417e70 | 1313 | } |
9fa3e853 | 1314 | |
8a40a180 | 1315 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
9fa3e853 FB |
1316 | |
1317 | #if !defined(CONFIG_SOFTMMU) | |
1318 | munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START); | |
0a962c02 FB |
1319 | #endif |
1320 | #ifdef USE_KQEMU | |
1321 | if (env->kqemu_enabled) { | |
1322 | kqemu_flush(env, flush_global); | |
1323 | } | |
9fa3e853 | 1324 | #endif |
e3db7226 | 1325 | tlb_flush_count++; |
33417e70 FB |
1326 | } |
1327 | ||
274da6b2 | 1328 | static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
61382a50 | 1329 | { |
84b7b8e7 FB |
1330 | if (addr == (tlb_entry->addr_read & |
1331 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || | |
1332 | addr == (tlb_entry->addr_write & | |
1333 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || | |
1334 | addr == (tlb_entry->addr_code & | |
1335 | (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
1336 | tlb_entry->addr_read = -1; | |
1337 | tlb_entry->addr_write = -1; | |
1338 | tlb_entry->addr_code = -1; | |
1339 | } | |
61382a50 FB |
1340 | } |
1341 | ||
2e12669a | 1342 | void tlb_flush_page(CPUState *env, target_ulong addr) |
33417e70 | 1343 | { |
8a40a180 | 1344 | int i; |
9fa3e853 | 1345 | TranslationBlock *tb; |
0124311e | 1346 | |
9fa3e853 | 1347 | #if defined(DEBUG_TLB) |
108c49b8 | 1348 | printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); |
9fa3e853 | 1349 | #endif |
0124311e FB |
1350 | /* must reset current TB so that interrupts cannot modify the |
1351 | links while we are modifying them */ | |
1352 | env->current_tb = NULL; | |
61382a50 FB |
1353 | |
1354 | addr &= TARGET_PAGE_MASK; | |
1355 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
84b7b8e7 FB |
1356 | tlb_flush_entry(&env->tlb_table[0][i], addr); |
1357 | tlb_flush_entry(&env->tlb_table[1][i], addr); | |
6fa4cea9 JM |
1358 | #if (NB_MMU_MODES >= 3) |
1359 | tlb_flush_entry(&env->tlb_table[2][i], addr); | |
1360 | #if (NB_MMU_MODES == 4) | |
1361 | tlb_flush_entry(&env->tlb_table[3][i], addr); | |
1362 | #endif | |
1363 | #endif | |
0124311e | 1364 | |
b362e5e0 PB |
1365 | /* Discard jump cache entries for any tb which might potentially |
1366 | overlap the flushed page. */ | |
1367 | i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); | |
1368 | memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb)); | |
1369 | ||
1370 | i = tb_jmp_cache_hash_page(addr); | |
1371 | memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb)); | |
9fa3e853 | 1372 | |
0124311e | 1373 | #if !defined(CONFIG_SOFTMMU) |
9fa3e853 | 1374 | if (addr < MMAP_AREA_END) |
0124311e | 1375 | munmap((void *)addr, TARGET_PAGE_SIZE); |
61382a50 | 1376 | #endif |
0a962c02 FB |
1377 | #ifdef USE_KQEMU |
1378 | if (env->kqemu_enabled) { | |
1379 | kqemu_flush_page(env, addr); | |
1380 | } | |
1381 | #endif | |
9fa3e853 FB |
1382 | } |
1383 | ||
9fa3e853 FB |
1384 | /* update the TLBs so that writes to code in the virtual page 'addr' |
1385 | can be detected */ | |
6a00d601 | 1386 | static void tlb_protect_code(ram_addr_t ram_addr) |
9fa3e853 | 1387 | { |
6a00d601 FB |
1388 | cpu_physical_memory_reset_dirty(ram_addr, |
1389 | ram_addr + TARGET_PAGE_SIZE, | |
1390 | CODE_DIRTY_FLAG); | |
9fa3e853 FB |
1391 | } |
1392 | ||
9fa3e853 | 1393 | /* update the TLB so that writes in physical page 'phys_addr' are no longer |
3a7d929e FB |
1394 | tested for self modifying code */ |
1395 | static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, | |
1396 | target_ulong vaddr) | |
9fa3e853 | 1397 | { |
3a7d929e | 1398 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG; |
1ccde1cb FB |
1399 | } |
1400 | ||
1401 | static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, | |
1402 | unsigned long start, unsigned long length) | |
1403 | { | |
1404 | unsigned long addr; | |
84b7b8e7 FB |
1405 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
1406 | addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; | |
1ccde1cb | 1407 | if ((addr - start) < length) { |
84b7b8e7 | 1408 | tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; |
1ccde1cb FB |
1409 | } |
1410 | } | |
1411 | } | |
1412 | ||
3a7d929e | 1413 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, |
0a962c02 | 1414 | int dirty_flags) |
1ccde1cb FB |
1415 | { |
1416 | CPUState *env; | |
4f2ac237 | 1417 | unsigned long length, start1; |
0a962c02 FB |
1418 | int i, mask, len; |
1419 | uint8_t *p; | |
1ccde1cb FB |
1420 | |
1421 | start &= TARGET_PAGE_MASK; | |
1422 | end = TARGET_PAGE_ALIGN(end); | |
1423 | ||
1424 | length = end - start; | |
1425 | if (length == 0) | |
1426 | return; | |
0a962c02 | 1427 | len = length >> TARGET_PAGE_BITS; |
3a7d929e | 1428 | #ifdef USE_KQEMU |
6a00d601 FB |
1429 | /* XXX: should not depend on cpu context */ |
1430 | env = first_cpu; | |
3a7d929e | 1431 | if (env->kqemu_enabled) { |
f23db169 FB |
1432 | ram_addr_t addr; |
1433 | addr = start; | |
1434 | for(i = 0; i < len; i++) { | |
1435 | kqemu_set_notdirty(env, addr); | |
1436 | addr += TARGET_PAGE_SIZE; | |
1437 | } | |
3a7d929e FB |
1438 | } |
1439 | #endif | |
f23db169 FB |
1440 | mask = ~dirty_flags; |
1441 | p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); | |
1442 | for(i = 0; i < len; i++) | |
1443 | p[i] &= mask; | |
1444 | ||
1ccde1cb FB |
1445 | /* we modify the TLB cache so that the dirty bit will be set again |
1446 | when accessing the range */ | |
59817ccb | 1447 | start1 = start + (unsigned long)phys_ram_base; |
6a00d601 FB |
1448 | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
1449 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
84b7b8e7 | 1450 | tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); |
6a00d601 | 1451 | for(i = 0; i < CPU_TLB_SIZE; i++) |
84b7b8e7 | 1452 | tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); |
6fa4cea9 JM |
1453 | #if (NB_MMU_MODES >= 3) |
1454 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1455 | tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length); | |
1456 | #if (NB_MMU_MODES == 4) | |
1457 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1458 | tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length); | |
1459 | #endif | |
1460 | #endif | |
6a00d601 | 1461 | } |
59817ccb FB |
1462 | |
1463 | #if !defined(CONFIG_SOFTMMU) | |
1464 | /* XXX: this is expensive */ | |
1465 | { | |
1466 | VirtPageDesc *p; | |
1467 | int j; | |
1468 | target_ulong addr; | |
1469 | ||
1470 | for(i = 0; i < L1_SIZE; i++) { | |
1471 | p = l1_virt_map[i]; | |
1472 | if (p) { | |
1473 | addr = i << (TARGET_PAGE_BITS + L2_BITS); | |
1474 | for(j = 0; j < L2_SIZE; j++) { | |
1475 | if (p->valid_tag == virt_valid_tag && | |
1476 | p->phys_addr >= start && p->phys_addr < end && | |
1477 | (p->prot & PROT_WRITE)) { | |
1478 | if (addr < MMAP_AREA_END) { | |
1479 | mprotect((void *)addr, TARGET_PAGE_SIZE, | |
1480 | p->prot & ~PROT_WRITE); | |
1481 | } | |
1482 | } | |
1483 | addr += TARGET_PAGE_SIZE; | |
1484 | p++; | |
1485 | } | |
1486 | } | |
1487 | } | |
1488 | } | |
1489 | #endif | |
1ccde1cb FB |
1490 | } |
1491 | ||
3a7d929e FB |
1492 | static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) |
1493 | { | |
1494 | ram_addr_t ram_addr; | |
1495 | ||
84b7b8e7 FB |
1496 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
1497 | ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + | |
3a7d929e FB |
1498 | tlb_entry->addend - (unsigned long)phys_ram_base; |
1499 | if (!cpu_physical_memory_is_dirty(ram_addr)) { | |
84b7b8e7 | 1500 | tlb_entry->addr_write |= IO_MEM_NOTDIRTY; |
3a7d929e FB |
1501 | } |
1502 | } | |
1503 | } | |
1504 | ||
1505 | /* update the TLB according to the current state of the dirty bits */ | |
1506 | void cpu_tlb_update_dirty(CPUState *env) | |
1507 | { | |
1508 | int i; | |
1509 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
84b7b8e7 | 1510 | tlb_update_dirty(&env->tlb_table[0][i]); |
3a7d929e | 1511 | for(i = 0; i < CPU_TLB_SIZE; i++) |
84b7b8e7 | 1512 | tlb_update_dirty(&env->tlb_table[1][i]); |
6fa4cea9 JM |
1513 | #if (NB_MMU_MODES >= 3) |
1514 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1515 | tlb_update_dirty(&env->tlb_table[2][i]); | |
1516 | #if (NB_MMU_MODES == 4) | |
1517 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1518 | tlb_update_dirty(&env->tlb_table[3][i]); | |
1519 | #endif | |
1520 | #endif | |
3a7d929e FB |
1521 | } |
1522 | ||
1ccde1cb | 1523 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, |
108c49b8 | 1524 | unsigned long start) |
1ccde1cb FB |
1525 | { |
1526 | unsigned long addr; | |
84b7b8e7 FB |
1527 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) { |
1528 | addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; | |
1ccde1cb | 1529 | if (addr == start) { |
84b7b8e7 | 1530 | tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM; |
1ccde1cb FB |
1531 | } |
1532 | } | |
1533 | } | |
1534 | ||
1535 | /* update the TLB corresponding to virtual page vaddr and phys addr | |
1536 | addr so that it is no longer dirty */ | |
6a00d601 FB |
1537 | static inline void tlb_set_dirty(CPUState *env, |
1538 | unsigned long addr, target_ulong vaddr) | |
1ccde1cb | 1539 | { |
1ccde1cb FB |
1540 | int i; |
1541 | ||
1ccde1cb FB |
1542 | addr &= TARGET_PAGE_MASK; |
1543 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
84b7b8e7 FB |
1544 | tlb_set_dirty1(&env->tlb_table[0][i], addr); |
1545 | tlb_set_dirty1(&env->tlb_table[1][i], addr); | |
6fa4cea9 JM |
1546 | #if (NB_MMU_MODES >= 3) |
1547 | tlb_set_dirty1(&env->tlb_table[2][i], addr); | |
1548 | #if (NB_MMU_MODES == 4) | |
1549 | tlb_set_dirty1(&env->tlb_table[3][i], addr); | |
1550 | #endif | |
1551 | #endif | |
9fa3e853 FB |
1552 | } |
1553 | ||
59817ccb FB |
1554 | /* add a new TLB entry. At most one entry for a given virtual address |
1555 | is permitted. Return 0 if OK or 2 if the page could not be mapped | |
1556 | (can only happen in non SOFTMMU mode for I/O pages or pages | |
1557 | conflicting with the host address space). */ | |
84b7b8e7 FB |
1558 | int tlb_set_page_exec(CPUState *env, target_ulong vaddr, |
1559 | target_phys_addr_t paddr, int prot, | |
1560 | int is_user, int is_softmmu) | |
9fa3e853 | 1561 | { |
92e873b9 | 1562 | PhysPageDesc *p; |
4f2ac237 | 1563 | unsigned long pd; |
9fa3e853 | 1564 | unsigned int index; |
4f2ac237 | 1565 | target_ulong address; |
108c49b8 | 1566 | target_phys_addr_t addend; |
9fa3e853 | 1567 | int ret; |
84b7b8e7 | 1568 | CPUTLBEntry *te; |
6658ffb8 | 1569 | int i; |
9fa3e853 | 1570 | |
92e873b9 | 1571 | p = phys_page_find(paddr >> TARGET_PAGE_BITS); |
9fa3e853 FB |
1572 | if (!p) { |
1573 | pd = IO_MEM_UNASSIGNED; | |
9fa3e853 FB |
1574 | } else { |
1575 | pd = p->phys_offset; | |
9fa3e853 FB |
1576 | } |
1577 | #if defined(DEBUG_TLB) | |
3a7d929e | 1578 | printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n", |
84b7b8e7 | 1579 | vaddr, (int)paddr, prot, is_user, is_softmmu, pd); |
9fa3e853 FB |
1580 | #endif |
1581 | ||
1582 | ret = 0; | |
1583 | #if !defined(CONFIG_SOFTMMU) | |
1584 | if (is_softmmu) | |
1585 | #endif | |
1586 | { | |
2a4188a3 | 1587 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { |
9fa3e853 FB |
1588 | /* IO memory case */ |
1589 | address = vaddr | pd; | |
1590 | addend = paddr; | |
1591 | } else { | |
1592 | /* standard memory */ | |
1593 | address = vaddr; | |
1594 | addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); | |
1595 | } | |
6658ffb8 PB |
1596 | |
1597 | /* Make accesses to pages with watchpoints go via the | |
1598 | watchpoint trap routines. */ | |
1599 | for (i = 0; i < env->nb_watchpoints; i++) { | |
1600 | if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) { | |
1601 | if (address & ~TARGET_PAGE_MASK) { | |
1602 | env->watchpoint[i].is_ram = 0; | |
1603 | address = vaddr | io_mem_watch; | |
1604 | } else { | |
1605 | env->watchpoint[i].is_ram = 1; | |
1606 | /* TODO: Figure out how to make read watchpoints coexist | |
1607 | with code. */ | |
1608 | pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD; | |
1609 | } | |
1610 | } | |
1611 | } | |
9fa3e853 | 1612 | |
90f18422 | 1613 | index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
9fa3e853 | 1614 | addend -= vaddr; |
84b7b8e7 FB |
1615 | te = &env->tlb_table[is_user][index]; |
1616 | te->addend = addend; | |
67b915a5 | 1617 | if (prot & PAGE_READ) { |
84b7b8e7 FB |
1618 | te->addr_read = address; |
1619 | } else { | |
1620 | te->addr_read = -1; | |
1621 | } | |
1622 | if (prot & PAGE_EXEC) { | |
1623 | te->addr_code = address; | |
9fa3e853 | 1624 | } else { |
84b7b8e7 | 1625 | te->addr_code = -1; |
9fa3e853 | 1626 | } |
67b915a5 | 1627 | if (prot & PAGE_WRITE) { |
856074ec FB |
1628 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || |
1629 | (pd & IO_MEM_ROMD)) { | |
1630 | /* write access calls the I/O callback */ | |
1631 | te->addr_write = vaddr | | |
1632 | (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD)); | |
3a7d929e | 1633 | } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && |
1ccde1cb | 1634 | !cpu_physical_memory_is_dirty(pd)) { |
84b7b8e7 | 1635 | te->addr_write = vaddr | IO_MEM_NOTDIRTY; |
9fa3e853 | 1636 | } else { |
84b7b8e7 | 1637 | te->addr_write = address; |
9fa3e853 FB |
1638 | } |
1639 | } else { | |
84b7b8e7 | 1640 | te->addr_write = -1; |
9fa3e853 FB |
1641 | } |
1642 | } | |
1643 | #if !defined(CONFIG_SOFTMMU) | |
1644 | else { | |
1645 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
1646 | /* IO access: no mapping is done as it will be handled by the | |
1647 | soft MMU */ | |
1648 | if (!(env->hflags & HF_SOFTMMU_MASK)) | |
1649 | ret = 2; | |
1650 | } else { | |
1651 | void *map_addr; | |
59817ccb FB |
1652 | |
1653 | if (vaddr >= MMAP_AREA_END) { | |
1654 | ret = 2; | |
1655 | } else { | |
1656 | if (prot & PROT_WRITE) { | |
1657 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || | |
d720b93d | 1658 | #if defined(TARGET_HAS_SMC) || 1 |
59817ccb | 1659 | first_tb || |
d720b93d | 1660 | #endif |
59817ccb FB |
1661 | ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && |
1662 | !cpu_physical_memory_is_dirty(pd))) { | |
1663 | /* ROM: we do as if code was inside */ | |
1664 | /* if code is present, we only map as read only and save the | |
1665 | original mapping */ | |
1666 | VirtPageDesc *vp; | |
1667 | ||
90f18422 | 1668 | vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1); |
59817ccb FB |
1669 | vp->phys_addr = pd; |
1670 | vp->prot = prot; | |
1671 | vp->valid_tag = virt_valid_tag; | |
1672 | prot &= ~PAGE_WRITE; | |
1673 | } | |
1674 | } | |
1675 | map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, | |
1676 | MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); | |
1677 | if (map_addr == MAP_FAILED) { | |
1678 | cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", | |
1679 | paddr, vaddr); | |
9fa3e853 | 1680 | } |
9fa3e853 FB |
1681 | } |
1682 | } | |
1683 | } | |
1684 | #endif | |
1685 | return ret; | |
1686 | } | |
1687 | ||
1688 | /* called from signal handler: invalidate the code and unprotect the | |
1689 | page. Return TRUE if the fault was succesfully handled. */ | |
53a5960a | 1690 | int page_unprotect(target_ulong addr, unsigned long pc, void *puc) |
9fa3e853 FB |
1691 | { |
1692 | #if !defined(CONFIG_SOFTMMU) | |
1693 | VirtPageDesc *vp; | |
1694 | ||
1695 | #if defined(DEBUG_TLB) | |
1696 | printf("page_unprotect: addr=0x%08x\n", addr); | |
1697 | #endif | |
1698 | addr &= TARGET_PAGE_MASK; | |
59817ccb FB |
1699 | |
1700 | /* if it is not mapped, no need to worry here */ | |
1701 | if (addr >= MMAP_AREA_END) | |
1702 | return 0; | |
9fa3e853 FB |
1703 | vp = virt_page_find(addr >> TARGET_PAGE_BITS); |
1704 | if (!vp) | |
1705 | return 0; | |
1706 | /* NOTE: in this case, validate_tag is _not_ tested as it | |
1707 | validates only the code TLB */ | |
1708 | if (vp->valid_tag != virt_valid_tag) | |
1709 | return 0; | |
1710 | if (!(vp->prot & PAGE_WRITE)) | |
1711 | return 0; | |
1712 | #if defined(DEBUG_TLB) | |
1713 | printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", | |
1714 | addr, vp->phys_addr, vp->prot); | |
1715 | #endif | |
59817ccb FB |
1716 | if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) |
1717 | cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n", | |
1718 | (unsigned long)addr, vp->prot); | |
d720b93d | 1719 | /* set the dirty bit */ |
0a962c02 | 1720 | phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff; |
d720b93d FB |
1721 | /* flush the code inside */ |
1722 | tb_invalidate_phys_page(vp->phys_addr, pc, puc); | |
9fa3e853 FB |
1723 | return 1; |
1724 | #else | |
1725 | return 0; | |
1726 | #endif | |
33417e70 FB |
1727 | } |
1728 | ||
0124311e FB |
1729 | #else |
1730 | ||
ee8b7021 | 1731 | void tlb_flush(CPUState *env, int flush_global) |
0124311e FB |
1732 | { |
1733 | } | |
1734 | ||
2e12669a | 1735 | void tlb_flush_page(CPUState *env, target_ulong addr) |
0124311e FB |
1736 | { |
1737 | } | |
1738 | ||
84b7b8e7 FB |
1739 | int tlb_set_page_exec(CPUState *env, target_ulong vaddr, |
1740 | target_phys_addr_t paddr, int prot, | |
1741 | int is_user, int is_softmmu) | |
9fa3e853 FB |
1742 | { |
1743 | return 0; | |
1744 | } | |
0124311e | 1745 | |
9fa3e853 FB |
1746 | /* dump memory mappings */ |
1747 | void page_dump(FILE *f) | |
33417e70 | 1748 | { |
9fa3e853 FB |
1749 | unsigned long start, end; |
1750 | int i, j, prot, prot1; | |
1751 | PageDesc *p; | |
33417e70 | 1752 | |
9fa3e853 FB |
1753 | fprintf(f, "%-8s %-8s %-8s %s\n", |
1754 | "start", "end", "size", "prot"); | |
1755 | start = -1; | |
1756 | end = -1; | |
1757 | prot = 0; | |
1758 | for(i = 0; i <= L1_SIZE; i++) { | |
1759 | if (i < L1_SIZE) | |
1760 | p = l1_map[i]; | |
1761 | else | |
1762 | p = NULL; | |
1763 | for(j = 0;j < L2_SIZE; j++) { | |
1764 | if (!p) | |
1765 | prot1 = 0; | |
1766 | else | |
1767 | prot1 = p[j].flags; | |
1768 | if (prot1 != prot) { | |
1769 | end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); | |
1770 | if (start != -1) { | |
1771 | fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", | |
1772 | start, end, end - start, | |
1773 | prot & PAGE_READ ? 'r' : '-', | |
1774 | prot & PAGE_WRITE ? 'w' : '-', | |
1775 | prot & PAGE_EXEC ? 'x' : '-'); | |
1776 | } | |
1777 | if (prot1 != 0) | |
1778 | start = end; | |
1779 | else | |
1780 | start = -1; | |
1781 | prot = prot1; | |
1782 | } | |
1783 | if (!p) | |
1784 | break; | |
1785 | } | |
33417e70 | 1786 | } |
33417e70 FB |
1787 | } |
1788 | ||
53a5960a | 1789 | int page_get_flags(target_ulong address) |
33417e70 | 1790 | { |
9fa3e853 FB |
1791 | PageDesc *p; |
1792 | ||
1793 | p = page_find(address >> TARGET_PAGE_BITS); | |
33417e70 | 1794 | if (!p) |
9fa3e853 FB |
1795 | return 0; |
1796 | return p->flags; | |
1797 | } | |
1798 | ||
1799 | /* modify the flags of a page and invalidate the code if | |
1800 | necessary. The flag PAGE_WRITE_ORG is positionned automatically | |
1801 | depending on PAGE_WRITE */ | |
53a5960a | 1802 | void page_set_flags(target_ulong start, target_ulong end, int flags) |
9fa3e853 FB |
1803 | { |
1804 | PageDesc *p; | |
53a5960a | 1805 | target_ulong addr; |
9fa3e853 FB |
1806 | |
1807 | start = start & TARGET_PAGE_MASK; | |
1808 | end = TARGET_PAGE_ALIGN(end); | |
1809 | if (flags & PAGE_WRITE) | |
1810 | flags |= PAGE_WRITE_ORG; | |
1811 | spin_lock(&tb_lock); | |
1812 | for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
1813 | p = page_find_alloc(addr >> TARGET_PAGE_BITS); | |
1814 | /* if the write protection is set, then we invalidate the code | |
1815 | inside */ | |
1816 | if (!(p->flags & PAGE_WRITE) && | |
1817 | (flags & PAGE_WRITE) && | |
1818 | p->first_tb) { | |
d720b93d | 1819 | tb_invalidate_phys_page(addr, 0, NULL); |
9fa3e853 FB |
1820 | } |
1821 | p->flags = flags; | |
1822 | } | |
1823 | spin_unlock(&tb_lock); | |
33417e70 FB |
1824 | } |
1825 | ||
9fa3e853 FB |
1826 | /* called from signal handler: invalidate the code and unprotect the |
1827 | page. Return TRUE if the fault was succesfully handled. */ | |
53a5960a | 1828 | int page_unprotect(target_ulong address, unsigned long pc, void *puc) |
9fa3e853 FB |
1829 | { |
1830 | unsigned int page_index, prot, pindex; | |
1831 | PageDesc *p, *p1; | |
53a5960a | 1832 | target_ulong host_start, host_end, addr; |
9fa3e853 | 1833 | |
83fb7adf | 1834 | host_start = address & qemu_host_page_mask; |
9fa3e853 FB |
1835 | page_index = host_start >> TARGET_PAGE_BITS; |
1836 | p1 = page_find(page_index); | |
1837 | if (!p1) | |
1838 | return 0; | |
83fb7adf | 1839 | host_end = host_start + qemu_host_page_size; |
9fa3e853 FB |
1840 | p = p1; |
1841 | prot = 0; | |
1842 | for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { | |
1843 | prot |= p->flags; | |
1844 | p++; | |
1845 | } | |
1846 | /* if the page was really writable, then we change its | |
1847 | protection back to writable */ | |
1848 | if (prot & PAGE_WRITE_ORG) { | |
1849 | pindex = (address - host_start) >> TARGET_PAGE_BITS; | |
1850 | if (!(p1[pindex].flags & PAGE_WRITE)) { | |
53a5960a | 1851 | mprotect((void *)g2h(host_start), qemu_host_page_size, |
9fa3e853 FB |
1852 | (prot & PAGE_BITS) | PAGE_WRITE); |
1853 | p1[pindex].flags |= PAGE_WRITE; | |
1854 | /* and since the content will be modified, we must invalidate | |
1855 | the corresponding translated code. */ | |
d720b93d | 1856 | tb_invalidate_phys_page(address, pc, puc); |
9fa3e853 FB |
1857 | #ifdef DEBUG_TB_CHECK |
1858 | tb_invalidate_check(address); | |
1859 | #endif | |
1860 | return 1; | |
1861 | } | |
1862 | } | |
1863 | return 0; | |
1864 | } | |
1865 | ||
1866 | /* call this function when system calls directly modify a memory area */ | |
53a5960a PB |
1867 | /* ??? This should be redundant now we have lock_user. */ |
1868 | void page_unprotect_range(target_ulong data, target_ulong data_size) | |
9fa3e853 | 1869 | { |
53a5960a | 1870 | target_ulong start, end, addr; |
9fa3e853 | 1871 | |
53a5960a | 1872 | start = data; |
9fa3e853 FB |
1873 | end = start + data_size; |
1874 | start &= TARGET_PAGE_MASK; | |
1875 | end = TARGET_PAGE_ALIGN(end); | |
1876 | for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
d720b93d | 1877 | page_unprotect(addr, 0, NULL); |
9fa3e853 FB |
1878 | } |
1879 | } | |
1880 | ||
6a00d601 FB |
1881 | static inline void tlb_set_dirty(CPUState *env, |
1882 | unsigned long addr, target_ulong vaddr) | |
1ccde1cb FB |
1883 | { |
1884 | } | |
9fa3e853 FB |
1885 | #endif /* defined(CONFIG_USER_ONLY) */ |
1886 | ||
33417e70 FB |
1887 | /* register physical memory. 'size' must be a multiple of the target |
1888 | page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an | |
1889 | io memory page */ | |
2e12669a FB |
1890 | void cpu_register_physical_memory(target_phys_addr_t start_addr, |
1891 | unsigned long size, | |
1892 | unsigned long phys_offset) | |
33417e70 | 1893 | { |
108c49b8 | 1894 | target_phys_addr_t addr, end_addr; |
92e873b9 | 1895 | PhysPageDesc *p; |
9d42037b | 1896 | CPUState *env; |
33417e70 | 1897 | |
5fd386f6 | 1898 | size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; |
33417e70 | 1899 | end_addr = start_addr + size; |
5fd386f6 | 1900 | for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { |
108c49b8 | 1901 | p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); |
9fa3e853 | 1902 | p->phys_offset = phys_offset; |
2a4188a3 FB |
1903 | if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || |
1904 | (phys_offset & IO_MEM_ROMD)) | |
33417e70 FB |
1905 | phys_offset += TARGET_PAGE_SIZE; |
1906 | } | |
9d42037b FB |
1907 | |
1908 | /* since each CPU stores ram addresses in its TLB cache, we must | |
1909 | reset the modified entries */ | |
1910 | /* XXX: slow ! */ | |
1911 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
1912 | tlb_flush(env, 1); | |
1913 | } | |
33417e70 FB |
1914 | } |
1915 | ||
ba863458 FB |
1916 | /* XXX: temporary until new memory mapping API */ |
1917 | uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr) | |
1918 | { | |
1919 | PhysPageDesc *p; | |
1920 | ||
1921 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
1922 | if (!p) | |
1923 | return IO_MEM_UNASSIGNED; | |
1924 | return p->phys_offset; | |
1925 | } | |
1926 | ||
e9a1ab19 FB |
1927 | /* XXX: better than nothing */ |
1928 | ram_addr_t qemu_ram_alloc(unsigned int size) | |
1929 | { | |
1930 | ram_addr_t addr; | |
1931 | if ((phys_ram_alloc_offset + size) >= phys_ram_size) { | |
1932 | fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n", | |
1933 | size, phys_ram_size); | |
1934 | abort(); | |
1935 | } | |
1936 | addr = phys_ram_alloc_offset; | |
1937 | phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size); | |
1938 | return addr; | |
1939 | } | |
1940 | ||
1941 | void qemu_ram_free(ram_addr_t addr) | |
1942 | { | |
1943 | } | |
1944 | ||
a4193c8a | 1945 | static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) |
33417e70 | 1946 | { |
67d3b957 PB |
1947 | #ifdef DEBUG_UNASSIGNED |
1948 | printf("Unassigned mem read 0x%08x\n", (int)addr); | |
1949 | #endif | |
33417e70 FB |
1950 | return 0; |
1951 | } | |
1952 | ||
a4193c8a | 1953 | static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) |
33417e70 | 1954 | { |
67d3b957 PB |
1955 | #ifdef DEBUG_UNASSIGNED |
1956 | printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val); | |
1957 | #endif | |
33417e70 FB |
1958 | } |
1959 | ||
1960 | static CPUReadMemoryFunc *unassigned_mem_read[3] = { | |
1961 | unassigned_mem_readb, | |
1962 | unassigned_mem_readb, | |
1963 | unassigned_mem_readb, | |
1964 | }; | |
1965 | ||
1966 | static CPUWriteMemoryFunc *unassigned_mem_write[3] = { | |
1967 | unassigned_mem_writeb, | |
1968 | unassigned_mem_writeb, | |
1969 | unassigned_mem_writeb, | |
1970 | }; | |
1971 | ||
3a7d929e | 1972 | static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) |
9fa3e853 | 1973 | { |
3a7d929e FB |
1974 | unsigned long ram_addr; |
1975 | int dirty_flags; | |
1976 | ram_addr = addr - (unsigned long)phys_ram_base; | |
1977 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1978 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
9fa3e853 | 1979 | #if !defined(CONFIG_USER_ONLY) |
3a7d929e FB |
1980 | tb_invalidate_phys_page_fast(ram_addr, 1); |
1981 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
9fa3e853 | 1982 | #endif |
3a7d929e | 1983 | } |
c27004ec | 1984 | stb_p((uint8_t *)(long)addr, val); |
f32fc648 FB |
1985 | #ifdef USE_KQEMU |
1986 | if (cpu_single_env->kqemu_enabled && | |
1987 | (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) | |
1988 | kqemu_modify_page(cpu_single_env, ram_addr); | |
1989 | #endif | |
f23db169 FB |
1990 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
1991 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
1992 | /* we remove the notdirty callback only if the code has been | |
1993 | flushed */ | |
1994 | if (dirty_flags == 0xff) | |
6a00d601 | 1995 | tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); |
9fa3e853 FB |
1996 | } |
1997 | ||
3a7d929e | 1998 | static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) |
9fa3e853 | 1999 | { |
3a7d929e FB |
2000 | unsigned long ram_addr; |
2001 | int dirty_flags; | |
2002 | ram_addr = addr - (unsigned long)phys_ram_base; | |
2003 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
2004 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
9fa3e853 | 2005 | #if !defined(CONFIG_USER_ONLY) |
3a7d929e FB |
2006 | tb_invalidate_phys_page_fast(ram_addr, 2); |
2007 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
9fa3e853 | 2008 | #endif |
3a7d929e | 2009 | } |
c27004ec | 2010 | stw_p((uint8_t *)(long)addr, val); |
f32fc648 FB |
2011 | #ifdef USE_KQEMU |
2012 | if (cpu_single_env->kqemu_enabled && | |
2013 | (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) | |
2014 | kqemu_modify_page(cpu_single_env, ram_addr); | |
2015 | #endif | |
f23db169 FB |
2016 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
2017 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
2018 | /* we remove the notdirty callback only if the code has been | |
2019 | flushed */ | |
2020 | if (dirty_flags == 0xff) | |
6a00d601 | 2021 | tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); |
9fa3e853 FB |
2022 | } |
2023 | ||
3a7d929e | 2024 | static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) |
9fa3e853 | 2025 | { |
3a7d929e FB |
2026 | unsigned long ram_addr; |
2027 | int dirty_flags; | |
2028 | ram_addr = addr - (unsigned long)phys_ram_base; | |
2029 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
2030 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
9fa3e853 | 2031 | #if !defined(CONFIG_USER_ONLY) |
3a7d929e FB |
2032 | tb_invalidate_phys_page_fast(ram_addr, 4); |
2033 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
9fa3e853 | 2034 | #endif |
3a7d929e | 2035 | } |
c27004ec | 2036 | stl_p((uint8_t *)(long)addr, val); |
f32fc648 FB |
2037 | #ifdef USE_KQEMU |
2038 | if (cpu_single_env->kqemu_enabled && | |
2039 | (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) | |
2040 | kqemu_modify_page(cpu_single_env, ram_addr); | |
2041 | #endif | |
f23db169 FB |
2042 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
2043 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
2044 | /* we remove the notdirty callback only if the code has been | |
2045 | flushed */ | |
2046 | if (dirty_flags == 0xff) | |
6a00d601 | 2047 | tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); |
9fa3e853 FB |
2048 | } |
2049 | ||
3a7d929e | 2050 | static CPUReadMemoryFunc *error_mem_read[3] = { |
9fa3e853 FB |
2051 | NULL, /* never used */ |
2052 | NULL, /* never used */ | |
2053 | NULL, /* never used */ | |
2054 | }; | |
2055 | ||
1ccde1cb FB |
2056 | static CPUWriteMemoryFunc *notdirty_mem_write[3] = { |
2057 | notdirty_mem_writeb, | |
2058 | notdirty_mem_writew, | |
2059 | notdirty_mem_writel, | |
2060 | }; | |
2061 | ||
6658ffb8 PB |
2062 | #if defined(CONFIG_SOFTMMU) |
2063 | /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, | |
2064 | so these check for a hit then pass through to the normal out-of-line | |
2065 | phys routines. */ | |
2066 | static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr) | |
2067 | { | |
2068 | return ldub_phys(addr); | |
2069 | } | |
2070 | ||
2071 | static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr) | |
2072 | { | |
2073 | return lduw_phys(addr); | |
2074 | } | |
2075 | ||
2076 | static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr) | |
2077 | { | |
2078 | return ldl_phys(addr); | |
2079 | } | |
2080 | ||
2081 | /* Generate a debug exception if a watchpoint has been hit. | |
2082 | Returns the real physical address of the access. addr will be a host | |
2083 | address in the is_ram case. */ | |
2084 | static target_ulong check_watchpoint(target_phys_addr_t addr) | |
2085 | { | |
2086 | CPUState *env = cpu_single_env; | |
2087 | target_ulong watch; | |
2088 | target_ulong retaddr; | |
2089 | int i; | |
2090 | ||
2091 | retaddr = addr; | |
2092 | for (i = 0; i < env->nb_watchpoints; i++) { | |
2093 | watch = env->watchpoint[i].vaddr; | |
2094 | if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) { | |
2095 | if (env->watchpoint[i].is_ram) | |
2096 | retaddr = addr - (unsigned long)phys_ram_base; | |
2097 | if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) { | |
2098 | cpu_single_env->watchpoint_hit = i + 1; | |
2099 | cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG); | |
2100 | break; | |
2101 | } | |
2102 | } | |
2103 | } | |
2104 | return retaddr; | |
2105 | } | |
2106 | ||
2107 | static void watch_mem_writeb(void *opaque, target_phys_addr_t addr, | |
2108 | uint32_t val) | |
2109 | { | |
2110 | addr = check_watchpoint(addr); | |
2111 | stb_phys(addr, val); | |
2112 | } | |
2113 | ||
2114 | static void watch_mem_writew(void *opaque, target_phys_addr_t addr, | |
2115 | uint32_t val) | |
2116 | { | |
2117 | addr = check_watchpoint(addr); | |
2118 | stw_phys(addr, val); | |
2119 | } | |
2120 | ||
2121 | static void watch_mem_writel(void *opaque, target_phys_addr_t addr, | |
2122 | uint32_t val) | |
2123 | { | |
2124 | addr = check_watchpoint(addr); | |
2125 | stl_phys(addr, val); | |
2126 | } | |
2127 | ||
2128 | static CPUReadMemoryFunc *watch_mem_read[3] = { | |
2129 | watch_mem_readb, | |
2130 | watch_mem_readw, | |
2131 | watch_mem_readl, | |
2132 | }; | |
2133 | ||
2134 | static CPUWriteMemoryFunc *watch_mem_write[3] = { | |
2135 | watch_mem_writeb, | |
2136 | watch_mem_writew, | |
2137 | watch_mem_writel, | |
2138 | }; | |
2139 | #endif | |
2140 | ||
33417e70 FB |
2141 | static void io_mem_init(void) |
2142 | { | |
3a7d929e | 2143 | cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL); |
a4193c8a | 2144 | cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); |
3a7d929e | 2145 | cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); |
1ccde1cb FB |
2146 | io_mem_nb = 5; |
2147 | ||
6658ffb8 PB |
2148 | #if defined(CONFIG_SOFTMMU) |
2149 | io_mem_watch = cpu_register_io_memory(-1, watch_mem_read, | |
2150 | watch_mem_write, NULL); | |
2151 | #endif | |
1ccde1cb | 2152 | /* alloc dirty bits array */ |
0a962c02 | 2153 | phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); |
3a7d929e | 2154 | memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS); |
33417e70 FB |
2155 | } |
2156 | ||
2157 | /* mem_read and mem_write are arrays of functions containing the | |
2158 | function to access byte (index 0), word (index 1) and dword (index | |
2159 | 2). All functions must be supplied. If io_index is non zero, the | |
2160 | corresponding io zone is modified. If it is zero, a new io zone is | |
2161 | allocated. The return value can be used with | |
2162 | cpu_register_physical_memory(). (-1) is returned if error. */ | |
2163 | int cpu_register_io_memory(int io_index, | |
2164 | CPUReadMemoryFunc **mem_read, | |
a4193c8a FB |
2165 | CPUWriteMemoryFunc **mem_write, |
2166 | void *opaque) | |
33417e70 FB |
2167 | { |
2168 | int i; | |
2169 | ||
2170 | if (io_index <= 0) { | |
b5ff1b31 | 2171 | if (io_mem_nb >= IO_MEM_NB_ENTRIES) |
33417e70 FB |
2172 | return -1; |
2173 | io_index = io_mem_nb++; | |
2174 | } else { | |
2175 | if (io_index >= IO_MEM_NB_ENTRIES) | |
2176 | return -1; | |
2177 | } | |
b5ff1b31 | 2178 | |
33417e70 FB |
2179 | for(i = 0;i < 3; i++) { |
2180 | io_mem_read[io_index][i] = mem_read[i]; | |
2181 | io_mem_write[io_index][i] = mem_write[i]; | |
2182 | } | |
a4193c8a | 2183 | io_mem_opaque[io_index] = opaque; |
33417e70 FB |
2184 | return io_index << IO_MEM_SHIFT; |
2185 | } | |
61382a50 | 2186 | |
8926b517 FB |
2187 | CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index) |
2188 | { | |
2189 | return io_mem_write[io_index >> IO_MEM_SHIFT]; | |
2190 | } | |
2191 | ||
2192 | CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index) | |
2193 | { | |
2194 | return io_mem_read[io_index >> IO_MEM_SHIFT]; | |
2195 | } | |
2196 | ||
13eb76e0 FB |
2197 | /* physical memory access (slow version, mainly for debug) */ |
2198 | #if defined(CONFIG_USER_ONLY) | |
2e12669a | 2199 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, |
13eb76e0 FB |
2200 | int len, int is_write) |
2201 | { | |
2202 | int l, flags; | |
2203 | target_ulong page; | |
53a5960a | 2204 | void * p; |
13eb76e0 FB |
2205 | |
2206 | while (len > 0) { | |
2207 | page = addr & TARGET_PAGE_MASK; | |
2208 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2209 | if (l > len) | |
2210 | l = len; | |
2211 | flags = page_get_flags(page); | |
2212 | if (!(flags & PAGE_VALID)) | |
2213 | return; | |
2214 | if (is_write) { | |
2215 | if (!(flags & PAGE_WRITE)) | |
2216 | return; | |
53a5960a PB |
2217 | p = lock_user(addr, len, 0); |
2218 | memcpy(p, buf, len); | |
2219 | unlock_user(p, addr, len); | |
13eb76e0 FB |
2220 | } else { |
2221 | if (!(flags & PAGE_READ)) | |
2222 | return; | |
53a5960a PB |
2223 | p = lock_user(addr, len, 1); |
2224 | memcpy(buf, p, len); | |
2225 | unlock_user(p, addr, 0); | |
13eb76e0 FB |
2226 | } |
2227 | len -= l; | |
2228 | buf += l; | |
2229 | addr += l; | |
2230 | } | |
2231 | } | |
8df1cd07 | 2232 | |
13eb76e0 | 2233 | #else |
2e12669a | 2234 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, |
13eb76e0 FB |
2235 | int len, int is_write) |
2236 | { | |
2237 | int l, io_index; | |
2238 | uint8_t *ptr; | |
2239 | uint32_t val; | |
2e12669a FB |
2240 | target_phys_addr_t page; |
2241 | unsigned long pd; | |
92e873b9 | 2242 | PhysPageDesc *p; |
13eb76e0 FB |
2243 | |
2244 | while (len > 0) { | |
2245 | page = addr & TARGET_PAGE_MASK; | |
2246 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2247 | if (l > len) | |
2248 | l = len; | |
92e873b9 | 2249 | p = phys_page_find(page >> TARGET_PAGE_BITS); |
13eb76e0 FB |
2250 | if (!p) { |
2251 | pd = IO_MEM_UNASSIGNED; | |
2252 | } else { | |
2253 | pd = p->phys_offset; | |
2254 | } | |
2255 | ||
2256 | if (is_write) { | |
3a7d929e | 2257 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
13eb76e0 | 2258 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
6a00d601 FB |
2259 | /* XXX: could force cpu_single_env to NULL to avoid |
2260 | potential bugs */ | |
13eb76e0 | 2261 | if (l >= 4 && ((addr & 3) == 0)) { |
1c213d19 | 2262 | /* 32 bit write access */ |
c27004ec | 2263 | val = ldl_p(buf); |
a4193c8a | 2264 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
13eb76e0 FB |
2265 | l = 4; |
2266 | } else if (l >= 2 && ((addr & 1) == 0)) { | |
1c213d19 | 2267 | /* 16 bit write access */ |
c27004ec | 2268 | val = lduw_p(buf); |
a4193c8a | 2269 | io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); |
13eb76e0 FB |
2270 | l = 2; |
2271 | } else { | |
1c213d19 | 2272 | /* 8 bit write access */ |
c27004ec | 2273 | val = ldub_p(buf); |
a4193c8a | 2274 | io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); |
13eb76e0 FB |
2275 | l = 1; |
2276 | } | |
2277 | } else { | |
b448f2f3 FB |
2278 | unsigned long addr1; |
2279 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
13eb76e0 | 2280 | /* RAM case */ |
b448f2f3 | 2281 | ptr = phys_ram_base + addr1; |
13eb76e0 | 2282 | memcpy(ptr, buf, l); |
3a7d929e FB |
2283 | if (!cpu_physical_memory_is_dirty(addr1)) { |
2284 | /* invalidate code */ | |
2285 | tb_invalidate_phys_page_range(addr1, addr1 + l, 0); | |
2286 | /* set dirty bit */ | |
f23db169 FB |
2287 | phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= |
2288 | (0xff & ~CODE_DIRTY_FLAG); | |
3a7d929e | 2289 | } |
13eb76e0 FB |
2290 | } |
2291 | } else { | |
2a4188a3 FB |
2292 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
2293 | !(pd & IO_MEM_ROMD)) { | |
13eb76e0 FB |
2294 | /* I/O case */ |
2295 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2296 | if (l >= 4 && ((addr & 3) == 0)) { | |
2297 | /* 32 bit read access */ | |
a4193c8a | 2298 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); |
c27004ec | 2299 | stl_p(buf, val); |
13eb76e0 FB |
2300 | l = 4; |
2301 | } else if (l >= 2 && ((addr & 1) == 0)) { | |
2302 | /* 16 bit read access */ | |
a4193c8a | 2303 | val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); |
c27004ec | 2304 | stw_p(buf, val); |
13eb76e0 FB |
2305 | l = 2; |
2306 | } else { | |
1c213d19 | 2307 | /* 8 bit read access */ |
a4193c8a | 2308 | val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); |
c27004ec | 2309 | stb_p(buf, val); |
13eb76e0 FB |
2310 | l = 1; |
2311 | } | |
2312 | } else { | |
2313 | /* RAM case */ | |
2314 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2315 | (addr & ~TARGET_PAGE_MASK); | |
2316 | memcpy(buf, ptr, l); | |
2317 | } | |
2318 | } | |
2319 | len -= l; | |
2320 | buf += l; | |
2321 | addr += l; | |
2322 | } | |
2323 | } | |
8df1cd07 | 2324 | |
d0ecd2aa FB |
2325 | /* used for ROM loading : can write in RAM and ROM */ |
2326 | void cpu_physical_memory_write_rom(target_phys_addr_t addr, | |
2327 | const uint8_t *buf, int len) | |
2328 | { | |
2329 | int l; | |
2330 | uint8_t *ptr; | |
2331 | target_phys_addr_t page; | |
2332 | unsigned long pd; | |
2333 | PhysPageDesc *p; | |
2334 | ||
2335 | while (len > 0) { | |
2336 | page = addr & TARGET_PAGE_MASK; | |
2337 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2338 | if (l > len) | |
2339 | l = len; | |
2340 | p = phys_page_find(page >> TARGET_PAGE_BITS); | |
2341 | if (!p) { | |
2342 | pd = IO_MEM_UNASSIGNED; | |
2343 | } else { | |
2344 | pd = p->phys_offset; | |
2345 | } | |
2346 | ||
2347 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM && | |
2a4188a3 FB |
2348 | (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM && |
2349 | !(pd & IO_MEM_ROMD)) { | |
d0ecd2aa FB |
2350 | /* do nothing */ |
2351 | } else { | |
2352 | unsigned long addr1; | |
2353 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
2354 | /* ROM/RAM case */ | |
2355 | ptr = phys_ram_base + addr1; | |
2356 | memcpy(ptr, buf, l); | |
2357 | } | |
2358 | len -= l; | |
2359 | buf += l; | |
2360 | addr += l; | |
2361 | } | |
2362 | } | |
2363 | ||
2364 | ||
8df1cd07 FB |
2365 | /* warning: addr must be aligned */ |
2366 | uint32_t ldl_phys(target_phys_addr_t addr) | |
2367 | { | |
2368 | int io_index; | |
2369 | uint8_t *ptr; | |
2370 | uint32_t val; | |
2371 | unsigned long pd; | |
2372 | PhysPageDesc *p; | |
2373 | ||
2374 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2375 | if (!p) { | |
2376 | pd = IO_MEM_UNASSIGNED; | |
2377 | } else { | |
2378 | pd = p->phys_offset; | |
2379 | } | |
2380 | ||
2a4188a3 FB |
2381 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
2382 | !(pd & IO_MEM_ROMD)) { | |
8df1cd07 FB |
2383 | /* I/O case */ |
2384 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2385 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); | |
2386 | } else { | |
2387 | /* RAM case */ | |
2388 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2389 | (addr & ~TARGET_PAGE_MASK); | |
2390 | val = ldl_p(ptr); | |
2391 | } | |
2392 | return val; | |
2393 | } | |
2394 | ||
84b7b8e7 FB |
2395 | /* warning: addr must be aligned */ |
2396 | uint64_t ldq_phys(target_phys_addr_t addr) | |
2397 | { | |
2398 | int io_index; | |
2399 | uint8_t *ptr; | |
2400 | uint64_t val; | |
2401 | unsigned long pd; | |
2402 | PhysPageDesc *p; | |
2403 | ||
2404 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2405 | if (!p) { | |
2406 | pd = IO_MEM_UNASSIGNED; | |
2407 | } else { | |
2408 | pd = p->phys_offset; | |
2409 | } | |
2410 | ||
2a4188a3 FB |
2411 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
2412 | !(pd & IO_MEM_ROMD)) { | |
84b7b8e7 FB |
2413 | /* I/O case */ |
2414 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2415 | #ifdef TARGET_WORDS_BIGENDIAN | |
2416 | val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; | |
2417 | val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4); | |
2418 | #else | |
2419 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); | |
2420 | val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32; | |
2421 | #endif | |
2422 | } else { | |
2423 | /* RAM case */ | |
2424 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2425 | (addr & ~TARGET_PAGE_MASK); | |
2426 | val = ldq_p(ptr); | |
2427 | } | |
2428 | return val; | |
2429 | } | |
2430 | ||
aab33094 FB |
2431 | /* XXX: optimize */ |
2432 | uint32_t ldub_phys(target_phys_addr_t addr) | |
2433 | { | |
2434 | uint8_t val; | |
2435 | cpu_physical_memory_read(addr, &val, 1); | |
2436 | return val; | |
2437 | } | |
2438 | ||
2439 | /* XXX: optimize */ | |
2440 | uint32_t lduw_phys(target_phys_addr_t addr) | |
2441 | { | |
2442 | uint16_t val; | |
2443 | cpu_physical_memory_read(addr, (uint8_t *)&val, 2); | |
2444 | return tswap16(val); | |
2445 | } | |
2446 | ||
8df1cd07 FB |
2447 | /* warning: addr must be aligned. The ram page is not masked as dirty |
2448 | and the code inside is not invalidated. It is useful if the dirty | |
2449 | bits are used to track modified PTEs */ | |
2450 | void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) | |
2451 | { | |
2452 | int io_index; | |
2453 | uint8_t *ptr; | |
2454 | unsigned long pd; | |
2455 | PhysPageDesc *p; | |
2456 | ||
2457 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2458 | if (!p) { | |
2459 | pd = IO_MEM_UNASSIGNED; | |
2460 | } else { | |
2461 | pd = p->phys_offset; | |
2462 | } | |
2463 | ||
3a7d929e | 2464 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
8df1cd07 FB |
2465 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
2466 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | |
2467 | } else { | |
2468 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2469 | (addr & ~TARGET_PAGE_MASK); | |
2470 | stl_p(ptr, val); | |
2471 | } | |
2472 | } | |
2473 | ||
bc98a7ef JM |
2474 | void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val) |
2475 | { | |
2476 | int io_index; | |
2477 | uint8_t *ptr; | |
2478 | unsigned long pd; | |
2479 | PhysPageDesc *p; | |
2480 | ||
2481 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2482 | if (!p) { | |
2483 | pd = IO_MEM_UNASSIGNED; | |
2484 | } else { | |
2485 | pd = p->phys_offset; | |
2486 | } | |
2487 | ||
2488 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { | |
2489 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2490 | #ifdef TARGET_WORDS_BIGENDIAN | |
2491 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32); | |
2492 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val); | |
2493 | #else | |
2494 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | |
2495 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32); | |
2496 | #endif | |
2497 | } else { | |
2498 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2499 | (addr & ~TARGET_PAGE_MASK); | |
2500 | stq_p(ptr, val); | |
2501 | } | |
2502 | } | |
2503 | ||
8df1cd07 | 2504 | /* warning: addr must be aligned */ |
8df1cd07 FB |
2505 | void stl_phys(target_phys_addr_t addr, uint32_t val) |
2506 | { | |
2507 | int io_index; | |
2508 | uint8_t *ptr; | |
2509 | unsigned long pd; | |
2510 | PhysPageDesc *p; | |
2511 | ||
2512 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2513 | if (!p) { | |
2514 | pd = IO_MEM_UNASSIGNED; | |
2515 | } else { | |
2516 | pd = p->phys_offset; | |
2517 | } | |
2518 | ||
3a7d929e | 2519 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
8df1cd07 FB |
2520 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
2521 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | |
2522 | } else { | |
2523 | unsigned long addr1; | |
2524 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
2525 | /* RAM case */ | |
2526 | ptr = phys_ram_base + addr1; | |
2527 | stl_p(ptr, val); | |
3a7d929e FB |
2528 | if (!cpu_physical_memory_is_dirty(addr1)) { |
2529 | /* invalidate code */ | |
2530 | tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); | |
2531 | /* set dirty bit */ | |
f23db169 FB |
2532 | phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= |
2533 | (0xff & ~CODE_DIRTY_FLAG); | |
3a7d929e | 2534 | } |
8df1cd07 FB |
2535 | } |
2536 | } | |
2537 | ||
aab33094 FB |
2538 | /* XXX: optimize */ |
2539 | void stb_phys(target_phys_addr_t addr, uint32_t val) | |
2540 | { | |
2541 | uint8_t v = val; | |
2542 | cpu_physical_memory_write(addr, &v, 1); | |
2543 | } | |
2544 | ||
2545 | /* XXX: optimize */ | |
2546 | void stw_phys(target_phys_addr_t addr, uint32_t val) | |
2547 | { | |
2548 | uint16_t v = tswap16(val); | |
2549 | cpu_physical_memory_write(addr, (const uint8_t *)&v, 2); | |
2550 | } | |
2551 | ||
2552 | /* XXX: optimize */ | |
2553 | void stq_phys(target_phys_addr_t addr, uint64_t val) | |
2554 | { | |
2555 | val = tswap64(val); | |
2556 | cpu_physical_memory_write(addr, (const uint8_t *)&val, 8); | |
2557 | } | |
2558 | ||
13eb76e0 FB |
2559 | #endif |
2560 | ||
2561 | /* virtual memory access for debug */ | |
b448f2f3 FB |
2562 | int cpu_memory_rw_debug(CPUState *env, target_ulong addr, |
2563 | uint8_t *buf, int len, int is_write) | |
13eb76e0 FB |
2564 | { |
2565 | int l; | |
2566 | target_ulong page, phys_addr; | |
2567 | ||
2568 | while (len > 0) { | |
2569 | page = addr & TARGET_PAGE_MASK; | |
2570 | phys_addr = cpu_get_phys_page_debug(env, page); | |
2571 | /* if no physical page mapped, return an error */ | |
2572 | if (phys_addr == -1) | |
2573 | return -1; | |
2574 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2575 | if (l > len) | |
2576 | l = len; | |
b448f2f3 FB |
2577 | cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), |
2578 | buf, l, is_write); | |
13eb76e0 FB |
2579 | len -= l; |
2580 | buf += l; | |
2581 | addr += l; | |
2582 | } | |
2583 | return 0; | |
2584 | } | |
2585 | ||
e3db7226 FB |
2586 | void dump_exec_info(FILE *f, |
2587 | int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) | |
2588 | { | |
2589 | int i, target_code_size, max_target_code_size; | |
2590 | int direct_jmp_count, direct_jmp2_count, cross_page; | |
2591 | TranslationBlock *tb; | |
2592 | ||
2593 | target_code_size = 0; | |
2594 | max_target_code_size = 0; | |
2595 | cross_page = 0; | |
2596 | direct_jmp_count = 0; | |
2597 | direct_jmp2_count = 0; | |
2598 | for(i = 0; i < nb_tbs; i++) { | |
2599 | tb = &tbs[i]; | |
2600 | target_code_size += tb->size; | |
2601 | if (tb->size > max_target_code_size) | |
2602 | max_target_code_size = tb->size; | |
2603 | if (tb->page_addr[1] != -1) | |
2604 | cross_page++; | |
2605 | if (tb->tb_next_offset[0] != 0xffff) { | |
2606 | direct_jmp_count++; | |
2607 | if (tb->tb_next_offset[1] != 0xffff) { | |
2608 | direct_jmp2_count++; | |
2609 | } | |
2610 | } | |
2611 | } | |
2612 | /* XXX: avoid using doubles ? */ | |
2613 | cpu_fprintf(f, "TB count %d\n", nb_tbs); | |
2614 | cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", | |
2615 | nb_tbs ? target_code_size / nb_tbs : 0, | |
2616 | max_target_code_size); | |
2617 | cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n", | |
2618 | nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0, | |
2619 | target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0); | |
2620 | cpu_fprintf(f, "cross page TB count %d (%d%%)\n", | |
2621 | cross_page, | |
2622 | nb_tbs ? (cross_page * 100) / nb_tbs : 0); | |
2623 | cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", | |
2624 | direct_jmp_count, | |
2625 | nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, | |
2626 | direct_jmp2_count, | |
2627 | nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); | |
2628 | cpu_fprintf(f, "TB flush count %d\n", tb_flush_count); | |
2629 | cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count); | |
2630 | cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); | |
2631 | } | |
2632 | ||
61382a50 FB |
2633 | #if !defined(CONFIG_USER_ONLY) |
2634 | ||
2635 | #define MMUSUFFIX _cmmu | |
2636 | #define GETPC() NULL | |
2637 | #define env cpu_single_env | |
b769d8fe | 2638 | #define SOFTMMU_CODE_ACCESS |
61382a50 FB |
2639 | |
2640 | #define SHIFT 0 | |
2641 | #include "softmmu_template.h" | |
2642 | ||
2643 | #define SHIFT 1 | |
2644 | #include "softmmu_template.h" | |
2645 | ||
2646 | #define SHIFT 2 | |
2647 | #include "softmmu_template.h" | |
2648 | ||
2649 | #define SHIFT 3 | |
2650 | #include "softmmu_template.h" | |
2651 | ||
2652 | #undef env | |
2653 | ||
2654 | #endif |