2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
41 #if defined(CONFIG_USER_ONLY)
45 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_UNASSIGNED
50 /* make various TB consistency checks */
51 //#define DEBUG_TB_CHECK
52 //#define DEBUG_TLB_CHECK
54 //#define DEBUG_IOPORT
55 //#define DEBUG_SUBPAGE
57 #if !defined(CONFIG_USER_ONLY)
58 /* TB consistency checks only implemented for usermode emulation. */
62 #define SMC_BITMAP_USE_THRESHOLD 10
64 #define MMAP_AREA_START 0x00000000
65 #define MMAP_AREA_END 0xa8000000
67 #if defined(TARGET_SPARC64)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 41
69 #elif defined(TARGET_SPARC)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 36
71 #elif defined(TARGET_ALPHA)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 42
73 #define TARGET_VIRT_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_PPC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 36
81 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 TranslationBlock *tbs;
86 int code_gen_max_blocks;
87 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
92 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
93 uint8_t *code_gen_buffer;
94 unsigned long code_gen_buffer_size;
95 /* threshold to flush the translated code buffer */
96 unsigned long code_gen_buffer_max_size;
97 uint8_t *code_gen_ptr;
99 #if !defined(CONFIG_USER_ONLY)
100 ram_addr_t phys_ram_size;
102 uint8_t *phys_ram_base;
103 uint8_t *phys_ram_dirty;
104 static ram_addr_t phys_ram_alloc_offset = 0;
108 /* current CPU in the current thread. It is only valid inside
110 CPUState *cpu_single_env;
111 /* 0 = Do not count executed instructions.
112 1 = Precice instruction counting.
113 2 = Adaptive rate instruction counting. */
115 /* Current instruction counter. While executing translated code this may
116 include some instructions that have not yet been executed. */
119 typedef struct PageDesc {
120 /* list of TBs intersecting this ram page */
121 TranslationBlock *first_tb;
122 /* in order to optimize self modifying code, we count the number
123 of lookups we do to a given page to use a bitmap */
124 unsigned int code_write_count;
125 uint8_t *code_bitmap;
126 #if defined(CONFIG_USER_ONLY)
131 typedef struct PhysPageDesc {
132 /* offset in host memory of the page + io_index in the low bits */
133 ram_addr_t phys_offset;
137 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
138 /* XXX: this is a temporary hack for alpha target.
139 * In the future, this is to be replaced by a multi-level table
140 * to actually be able to handle the complete 64 bits address space.
142 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
144 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
147 #define L1_SIZE (1 << L1_BITS)
148 #define L2_SIZE (1 << L2_BITS)
150 unsigned long qemu_real_host_page_size;
151 unsigned long qemu_host_page_bits;
152 unsigned long qemu_host_page_size;
153 unsigned long qemu_host_page_mask;
155 /* XXX: for system emulation, it could just be an array */
156 static PageDesc *l1_map[L1_SIZE];
157 PhysPageDesc **l1_phys_map;
159 #if !defined(CONFIG_USER_ONLY)
160 static void io_mem_init(void);
162 /* io memory support */
163 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
164 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
165 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
166 static int io_mem_nb;
167 static int io_mem_watch;
171 char *logfilename = "/tmp/qemu.log";
174 static int log_append = 0;
177 static int tlb_flush_count;
178 static int tb_flush_count;
179 static int tb_phys_invalidate_count;
181 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
182 typedef struct subpage_t {
183 target_phys_addr_t base;
184 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
185 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
186 void *opaque[TARGET_PAGE_SIZE][2][4];
190 static void map_exec(void *addr, long size)
193 VirtualProtect(addr, size,
194 PAGE_EXECUTE_READWRITE, &old_protect);
198 static void map_exec(void *addr, long size)
200 unsigned long start, end, page_size;
202 page_size = getpagesize();
203 start = (unsigned long)addr;
204 start &= ~(page_size - 1);
206 end = (unsigned long)addr + size;
207 end += page_size - 1;
208 end &= ~(page_size - 1);
210 mprotect((void *)start, end - start,
211 PROT_READ | PROT_WRITE | PROT_EXEC);
215 static void page_init(void)
217 /* NOTE: we can always suppose that qemu_host_page_size >=
221 SYSTEM_INFO system_info;
224 GetSystemInfo(&system_info);
225 qemu_real_host_page_size = system_info.dwPageSize;
228 qemu_real_host_page_size = getpagesize();
230 if (qemu_host_page_size == 0)
231 qemu_host_page_size = qemu_real_host_page_size;
232 if (qemu_host_page_size < TARGET_PAGE_SIZE)
233 qemu_host_page_size = TARGET_PAGE_SIZE;
234 qemu_host_page_bits = 0;
235 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
236 qemu_host_page_bits++;
237 qemu_host_page_mask = ~(qemu_host_page_size - 1);
238 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
239 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
241 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
243 long long startaddr, endaddr;
248 last_brk = (unsigned long)sbrk(0);
249 f = fopen("/proc/self/maps", "r");
252 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
254 startaddr = MIN(startaddr,
255 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
256 endaddr = MIN(endaddr,
257 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
258 page_set_flags(startaddr & TARGET_PAGE_MASK,
259 TARGET_PAGE_ALIGN(endaddr),
270 static inline PageDesc *page_find_alloc(target_ulong index)
274 #if TARGET_LONG_BITS > 32
275 /* Host memory outside guest VM. For 32-bit targets we have already
276 excluded high addresses. */
277 if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
280 lp = &l1_map[index >> L2_BITS];
283 /* allocate if not found */
284 #if defined(CONFIG_USER_ONLY)
286 size_t len = sizeof(PageDesc) * L2_SIZE;
287 /* Don't use qemu_malloc because it may recurse. */
288 p = mmap(0, len, PROT_READ | PROT_WRITE,
289 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
292 if (addr == (target_ulong)addr) {
293 page_set_flags(addr & TARGET_PAGE_MASK,
294 TARGET_PAGE_ALIGN(addr + len),
298 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
302 return p + (index & (L2_SIZE - 1));
305 static inline PageDesc *page_find(target_ulong index)
309 p = l1_map[index >> L2_BITS];
312 return p + (index & (L2_SIZE - 1));
315 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
320 p = (void **)l1_phys_map;
321 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
323 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
324 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
326 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
329 /* allocate if not found */
332 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
333 memset(p, 0, sizeof(void *) * L1_SIZE);
337 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
341 /* allocate if not found */
344 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
346 for (i = 0; i < L2_SIZE; i++)
347 pd[i].phys_offset = IO_MEM_UNASSIGNED;
349 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
352 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
354 return phys_page_find_alloc(index, 0);
357 #if !defined(CONFIG_USER_ONLY)
358 static void tlb_protect_code(ram_addr_t ram_addr);
359 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
361 #define mmap_lock() do { } while(0)
362 #define mmap_unlock() do { } while(0)
365 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
367 #if defined(CONFIG_USER_ONLY)
368 /* Currently it is not recommanded to allocate big chunks of data in
369 user mode. It will change when a dedicated libc will be used */
370 #define USE_STATIC_CODE_GEN_BUFFER
373 #ifdef USE_STATIC_CODE_GEN_BUFFER
374 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
377 void code_gen_alloc(unsigned long tb_size)
379 #ifdef USE_STATIC_CODE_GEN_BUFFER
380 code_gen_buffer = static_code_gen_buffer;
381 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
382 map_exec(code_gen_buffer, code_gen_buffer_size);
384 code_gen_buffer_size = tb_size;
385 if (code_gen_buffer_size == 0) {
386 #if defined(CONFIG_USER_ONLY)
387 /* in user mode, phys_ram_size is not meaningful */
388 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
390 /* XXX: needs ajustments */
391 code_gen_buffer_size = (int)(phys_ram_size / 4);
394 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
395 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
396 /* The code gen buffer location may have constraints depending on
397 the host cpu and OS */
398 #if defined(__linux__)
401 flags = MAP_PRIVATE | MAP_ANONYMOUS;
402 #if defined(__x86_64__)
404 /* Cannot map more than that */
405 if (code_gen_buffer_size > (800 * 1024 * 1024))
406 code_gen_buffer_size = (800 * 1024 * 1024);
408 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
409 PROT_WRITE | PROT_READ | PROT_EXEC,
411 if (code_gen_buffer == MAP_FAILED) {
412 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
417 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
418 if (!code_gen_buffer) {
419 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
422 map_exec(code_gen_buffer, code_gen_buffer_size);
424 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
425 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
426 code_gen_buffer_max_size = code_gen_buffer_size -
427 code_gen_max_block_size();
428 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
429 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
432 /* Must be called before using the QEMU cpus. 'tb_size' is the size
433 (in bytes) allocated to the translation buffer. Zero means default
435 void cpu_exec_init_all(unsigned long tb_size)
438 code_gen_alloc(tb_size);
439 code_gen_ptr = code_gen_buffer;
441 #if !defined(CONFIG_USER_ONLY)
446 void cpu_exec_init(CPUState *env)
451 env->next_cpu = NULL;
454 while (*penv != NULL) {
455 penv = (CPUState **)&(*penv)->next_cpu;
458 env->cpu_index = cpu_index;
459 env->nb_watchpoints = 0;
461 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
462 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
463 cpu_save, cpu_load, env);
467 static inline void invalidate_page_bitmap(PageDesc *p)
469 if (p->code_bitmap) {
470 qemu_free(p->code_bitmap);
471 p->code_bitmap = NULL;
473 p->code_write_count = 0;
476 /* set to NULL all the 'first_tb' fields in all PageDescs */
477 static void page_flush_tb(void)
482 for(i = 0; i < L1_SIZE; i++) {
485 for(j = 0; j < L2_SIZE; j++) {
487 invalidate_page_bitmap(p);
494 /* flush all the translation blocks */
495 /* XXX: tb_flush is currently not thread safe */
496 void tb_flush(CPUState *env1)
499 #if defined(DEBUG_FLUSH)
500 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
501 (unsigned long)(code_gen_ptr - code_gen_buffer),
503 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
505 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
506 cpu_abort(env1, "Internal error: code buffer overflow\n");
510 for(env = first_cpu; env != NULL; env = env->next_cpu) {
511 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
514 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
517 code_gen_ptr = code_gen_buffer;
518 /* XXX: flush processor icache at this point if cache flush is
523 #ifdef DEBUG_TB_CHECK
525 static void tb_invalidate_check(target_ulong address)
527 TranslationBlock *tb;
529 address &= TARGET_PAGE_MASK;
530 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
531 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
532 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
533 address >= tb->pc + tb->size)) {
534 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
535 address, (long)tb->pc, tb->size);
541 /* verify that all the pages have correct rights for code */
542 static void tb_page_check(void)
544 TranslationBlock *tb;
545 int i, flags1, flags2;
547 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
548 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
549 flags1 = page_get_flags(tb->pc);
550 flags2 = page_get_flags(tb->pc + tb->size - 1);
551 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
552 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
553 (long)tb->pc, tb->size, flags1, flags2);
559 void tb_jmp_check(TranslationBlock *tb)
561 TranslationBlock *tb1;
564 /* suppress any remaining jumps to this TB */
568 tb1 = (TranslationBlock *)((long)tb1 & ~3);
571 tb1 = tb1->jmp_next[n1];
573 /* check end of list */
575 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
581 /* invalidate one TB */
582 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
585 TranslationBlock *tb1;
589 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
592 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
596 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
598 TranslationBlock *tb1;
604 tb1 = (TranslationBlock *)((long)tb1 & ~3);
606 *ptb = tb1->page_next[n1];
609 ptb = &tb1->page_next[n1];
613 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
615 TranslationBlock *tb1, **ptb;
618 ptb = &tb->jmp_next[n];
621 /* find tb(n) in circular list */
625 tb1 = (TranslationBlock *)((long)tb1 & ~3);
626 if (n1 == n && tb1 == tb)
629 ptb = &tb1->jmp_first;
631 ptb = &tb1->jmp_next[n1];
634 /* now we can suppress tb(n) from the list */
635 *ptb = tb->jmp_next[n];
637 tb->jmp_next[n] = NULL;
641 /* reset the jump entry 'n' of a TB so that it is not chained to
643 static inline void tb_reset_jump(TranslationBlock *tb, int n)
645 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
648 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
653 target_phys_addr_t phys_pc;
654 TranslationBlock *tb1, *tb2;
656 /* remove the TB from the hash list */
657 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
658 h = tb_phys_hash_func(phys_pc);
659 tb_remove(&tb_phys_hash[h], tb,
660 offsetof(TranslationBlock, phys_hash_next));
662 /* remove the TB from the page list */
663 if (tb->page_addr[0] != page_addr) {
664 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
665 tb_page_remove(&p->first_tb, tb);
666 invalidate_page_bitmap(p);
668 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
669 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
670 tb_page_remove(&p->first_tb, tb);
671 invalidate_page_bitmap(p);
674 tb_invalidated_flag = 1;
676 /* remove the TB from the hash list */
677 h = tb_jmp_cache_hash_func(tb->pc);
678 for(env = first_cpu; env != NULL; env = env->next_cpu) {
679 if (env->tb_jmp_cache[h] == tb)
680 env->tb_jmp_cache[h] = NULL;
683 /* suppress this TB from the two jump lists */
684 tb_jmp_remove(tb, 0);
685 tb_jmp_remove(tb, 1);
687 /* suppress any remaining jumps to this TB */
693 tb1 = (TranslationBlock *)((long)tb1 & ~3);
694 tb2 = tb1->jmp_next[n1];
695 tb_reset_jump(tb1, n1);
696 tb1->jmp_next[n1] = NULL;
699 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
701 tb_phys_invalidate_count++;
704 static inline void set_bits(uint8_t *tab, int start, int len)
710 mask = 0xff << (start & 7);
711 if ((start & ~7) == (end & ~7)) {
713 mask &= ~(0xff << (end & 7));
718 start = (start + 8) & ~7;
720 while (start < end1) {
725 mask = ~(0xff << (end & 7));
731 static void build_page_bitmap(PageDesc *p)
733 int n, tb_start, tb_end;
734 TranslationBlock *tb;
736 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
743 tb = (TranslationBlock *)((long)tb & ~3);
744 /* NOTE: this is subtle as a TB may span two physical pages */
746 /* NOTE: tb_end may be after the end of the page, but
747 it is not a problem */
748 tb_start = tb->pc & ~TARGET_PAGE_MASK;
749 tb_end = tb_start + tb->size;
750 if (tb_end > TARGET_PAGE_SIZE)
751 tb_end = TARGET_PAGE_SIZE;
754 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
756 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
757 tb = tb->page_next[n];
761 TranslationBlock *tb_gen_code(CPUState *env,
762 target_ulong pc, target_ulong cs_base,
763 int flags, int cflags)
765 TranslationBlock *tb;
767 target_ulong phys_pc, phys_page2, virt_page2;
770 phys_pc = get_phys_addr_code(env, pc);
773 /* flush must be done */
775 /* cannot fail at this point */
777 /* Don't forget to invalidate previous TB info. */
778 tb_invalidated_flag = 1;
780 tc_ptr = code_gen_ptr;
782 tb->cs_base = cs_base;
785 cpu_gen_code(env, tb, &code_gen_size);
786 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
788 /* check next page if needed */
789 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
791 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
792 phys_page2 = get_phys_addr_code(env, virt_page2);
794 tb_link_phys(tb, phys_pc, phys_page2);
798 /* invalidate all TBs which intersect with the target physical page
799 starting in range [start;end[. NOTE: start and end must refer to
800 the same physical page. 'is_cpu_write_access' should be true if called
801 from a real cpu write access: the virtual CPU will exit the current
802 TB if code is modified inside this TB. */
803 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
804 int is_cpu_write_access)
806 int n, current_tb_modified, current_tb_not_found, current_flags;
807 CPUState *env = cpu_single_env;
809 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
810 target_ulong tb_start, tb_end;
811 target_ulong current_pc, current_cs_base;
813 p = page_find(start >> TARGET_PAGE_BITS);
816 if (!p->code_bitmap &&
817 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
818 is_cpu_write_access) {
819 /* build code bitmap */
820 build_page_bitmap(p);
823 /* we remove all the TBs in the range [start, end[ */
824 /* XXX: see if in some cases it could be faster to invalidate all the code */
825 current_tb_not_found = is_cpu_write_access;
826 current_tb_modified = 0;
827 current_tb = NULL; /* avoid warning */
828 current_pc = 0; /* avoid warning */
829 current_cs_base = 0; /* avoid warning */
830 current_flags = 0; /* avoid warning */
834 tb = (TranslationBlock *)((long)tb & ~3);
835 tb_next = tb->page_next[n];
836 /* NOTE: this is subtle as a TB may span two physical pages */
838 /* NOTE: tb_end may be after the end of the page, but
839 it is not a problem */
840 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
841 tb_end = tb_start + tb->size;
843 tb_start = tb->page_addr[1];
844 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
846 if (!(tb_end <= start || tb_start >= end)) {
847 #ifdef TARGET_HAS_PRECISE_SMC
848 if (current_tb_not_found) {
849 current_tb_not_found = 0;
851 if (env->mem_io_pc) {
852 /* now we have a real cpu fault */
853 current_tb = tb_find_pc(env->mem_io_pc);
856 if (current_tb == tb &&
857 (current_tb->cflags & CF_COUNT_MASK) != 1) {
858 /* If we are modifying the current TB, we must stop
859 its execution. We could be more precise by checking
860 that the modification is after the current PC, but it
861 would require a specialized function to partially
862 restore the CPU state */
864 current_tb_modified = 1;
865 cpu_restore_state(current_tb, env,
866 env->mem_io_pc, NULL);
867 #if defined(TARGET_I386)
868 current_flags = env->hflags;
869 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
870 current_cs_base = (target_ulong)env->segs[R_CS].base;
871 current_pc = current_cs_base + env->eip;
873 #error unsupported CPU
876 #endif /* TARGET_HAS_PRECISE_SMC */
877 /* we need to do that to handle the case where a signal
878 occurs while doing tb_phys_invalidate() */
881 saved_tb = env->current_tb;
882 env->current_tb = NULL;
884 tb_phys_invalidate(tb, -1);
886 env->current_tb = saved_tb;
887 if (env->interrupt_request && env->current_tb)
888 cpu_interrupt(env, env->interrupt_request);
893 #if !defined(CONFIG_USER_ONLY)
894 /* if no code remaining, no need to continue to use slow writes */
896 invalidate_page_bitmap(p);
897 if (is_cpu_write_access) {
898 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
902 #ifdef TARGET_HAS_PRECISE_SMC
903 if (current_tb_modified) {
904 /* we generate a block containing just the instruction
905 modifying the memory. It will ensure that it cannot modify
907 env->current_tb = NULL;
908 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
909 cpu_resume_from_signal(env, NULL);
914 /* len must be <= 8 and start must be a multiple of len */
915 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
922 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
923 cpu_single_env->mem_io_vaddr, len,
925 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
929 p = page_find(start >> TARGET_PAGE_BITS);
932 if (p->code_bitmap) {
933 offset = start & ~TARGET_PAGE_MASK;
934 b = p->code_bitmap[offset >> 3] >> (offset & 7);
935 if (b & ((1 << len) - 1))
939 tb_invalidate_phys_page_range(start, start + len, 1);
943 #if !defined(CONFIG_SOFTMMU)
944 static void tb_invalidate_phys_page(target_phys_addr_t addr,
945 unsigned long pc, void *puc)
947 int n, current_flags, current_tb_modified;
948 target_ulong current_pc, current_cs_base;
950 TranslationBlock *tb, *current_tb;
951 #ifdef TARGET_HAS_PRECISE_SMC
952 CPUState *env = cpu_single_env;
955 addr &= TARGET_PAGE_MASK;
956 p = page_find(addr >> TARGET_PAGE_BITS);
960 current_tb_modified = 0;
962 current_pc = 0; /* avoid warning */
963 current_cs_base = 0; /* avoid warning */
964 current_flags = 0; /* avoid warning */
965 #ifdef TARGET_HAS_PRECISE_SMC
967 current_tb = tb_find_pc(pc);
972 tb = (TranslationBlock *)((long)tb & ~3);
973 #ifdef TARGET_HAS_PRECISE_SMC
974 if (current_tb == tb &&
975 (current_tb->cflags & CF_COUNT_MASK) != 1) {
976 /* If we are modifying the current TB, we must stop
977 its execution. We could be more precise by checking
978 that the modification is after the current PC, but it
979 would require a specialized function to partially
980 restore the CPU state */
982 current_tb_modified = 1;
983 cpu_restore_state(current_tb, env, pc, puc);
984 #if defined(TARGET_I386)
985 current_flags = env->hflags;
986 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
987 current_cs_base = (target_ulong)env->segs[R_CS].base;
988 current_pc = current_cs_base + env->eip;
990 #error unsupported CPU
993 #endif /* TARGET_HAS_PRECISE_SMC */
994 tb_phys_invalidate(tb, addr);
995 tb = tb->page_next[n];
998 #ifdef TARGET_HAS_PRECISE_SMC
999 if (current_tb_modified) {
1000 /* we generate a block containing just the instruction
1001 modifying the memory. It will ensure that it cannot modify
1003 env->current_tb = NULL;
1004 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1005 cpu_resume_from_signal(env, puc);
1011 /* add the tb in the target page and protect it if necessary */
1012 static inline void tb_alloc_page(TranslationBlock *tb,
1013 unsigned int n, target_ulong page_addr)
1016 TranslationBlock *last_first_tb;
1018 tb->page_addr[n] = page_addr;
1019 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1020 tb->page_next[n] = p->first_tb;
1021 last_first_tb = p->first_tb;
1022 p->first_tb = (TranslationBlock *)((long)tb | n);
1023 invalidate_page_bitmap(p);
1025 #if defined(TARGET_HAS_SMC) || 1
1027 #if defined(CONFIG_USER_ONLY)
1028 if (p->flags & PAGE_WRITE) {
1033 /* force the host page as non writable (writes will have a
1034 page fault + mprotect overhead) */
1035 page_addr &= qemu_host_page_mask;
1037 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1038 addr += TARGET_PAGE_SIZE) {
1040 p2 = page_find (addr >> TARGET_PAGE_BITS);
1044 p2->flags &= ~PAGE_WRITE;
1045 page_get_flags(addr);
1047 mprotect(g2h(page_addr), qemu_host_page_size,
1048 (prot & PAGE_BITS) & ~PAGE_WRITE);
1049 #ifdef DEBUG_TB_INVALIDATE
1050 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1055 /* if some code is already present, then the pages are already
1056 protected. So we handle the case where only the first TB is
1057 allocated in a physical page */
1058 if (!last_first_tb) {
1059 tlb_protect_code(page_addr);
1063 #endif /* TARGET_HAS_SMC */
1066 /* Allocate a new translation block. Flush the translation buffer if
1067 too many translation blocks or too much generated code. */
1068 TranslationBlock *tb_alloc(target_ulong pc)
1070 TranslationBlock *tb;
1072 if (nb_tbs >= code_gen_max_blocks ||
1073 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1075 tb = &tbs[nb_tbs++];
1081 void tb_free(TranslationBlock *tb)
1083 /* In practice this is mostly used for single use temorary TB
1084 Ignore the hard cases and just back up if this TB happens to
1085 be the last one generated. */
1086 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1087 code_gen_ptr = tb->tc_ptr;
1092 /* add a new TB and link it to the physical page tables. phys_page2 is
1093 (-1) to indicate that only one page contains the TB. */
1094 void tb_link_phys(TranslationBlock *tb,
1095 target_ulong phys_pc, target_ulong phys_page2)
1098 TranslationBlock **ptb;
1100 /* Grab the mmap lock to stop another thread invalidating this TB
1101 before we are done. */
1103 /* add in the physical hash table */
1104 h = tb_phys_hash_func(phys_pc);
1105 ptb = &tb_phys_hash[h];
1106 tb->phys_hash_next = *ptb;
1109 /* add in the page list */
1110 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1111 if (phys_page2 != -1)
1112 tb_alloc_page(tb, 1, phys_page2);
1114 tb->page_addr[1] = -1;
1116 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1117 tb->jmp_next[0] = NULL;
1118 tb->jmp_next[1] = NULL;
1120 /* init original jump addresses */
1121 if (tb->tb_next_offset[0] != 0xffff)
1122 tb_reset_jump(tb, 0);
1123 if (tb->tb_next_offset[1] != 0xffff)
1124 tb_reset_jump(tb, 1);
1126 #ifdef DEBUG_TB_CHECK
1132 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1133 tb[1].tc_ptr. Return NULL if not found */
1134 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1136 int m_min, m_max, m;
1138 TranslationBlock *tb;
1142 if (tc_ptr < (unsigned long)code_gen_buffer ||
1143 tc_ptr >= (unsigned long)code_gen_ptr)
1145 /* binary search (cf Knuth) */
1148 while (m_min <= m_max) {
1149 m = (m_min + m_max) >> 1;
1151 v = (unsigned long)tb->tc_ptr;
1154 else if (tc_ptr < v) {
1163 static void tb_reset_jump_recursive(TranslationBlock *tb);
1165 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1167 TranslationBlock *tb1, *tb_next, **ptb;
1170 tb1 = tb->jmp_next[n];
1172 /* find head of list */
1175 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1178 tb1 = tb1->jmp_next[n1];
1180 /* we are now sure now that tb jumps to tb1 */
1183 /* remove tb from the jmp_first list */
1184 ptb = &tb_next->jmp_first;
1188 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1189 if (n1 == n && tb1 == tb)
1191 ptb = &tb1->jmp_next[n1];
1193 *ptb = tb->jmp_next[n];
1194 tb->jmp_next[n] = NULL;
1196 /* suppress the jump to next tb in generated code */
1197 tb_reset_jump(tb, n);
1199 /* suppress jumps in the tb on which we could have jumped */
1200 tb_reset_jump_recursive(tb_next);
1204 static void tb_reset_jump_recursive(TranslationBlock *tb)
1206 tb_reset_jump_recursive2(tb, 0);
1207 tb_reset_jump_recursive2(tb, 1);
1210 #if defined(TARGET_HAS_ICE)
1211 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1213 target_phys_addr_t addr;
1215 ram_addr_t ram_addr;
1218 addr = cpu_get_phys_page_debug(env, pc);
1219 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1221 pd = IO_MEM_UNASSIGNED;
1223 pd = p->phys_offset;
1225 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1226 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1230 /* Add a watchpoint. */
1231 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1235 for (i = 0; i < env->nb_watchpoints; i++) {
1236 if (addr == env->watchpoint[i].vaddr)
1239 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1242 i = env->nb_watchpoints++;
1243 env->watchpoint[i].vaddr = addr;
1244 env->watchpoint[i].type = type;
1245 tlb_flush_page(env, addr);
1246 /* FIXME: This flush is needed because of the hack to make memory ops
1247 terminate the TB. It can be removed once the proper IO trap and
1248 re-execute bits are in. */
1253 /* Remove a watchpoint. */
1254 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1258 for (i = 0; i < env->nb_watchpoints; i++) {
1259 if (addr == env->watchpoint[i].vaddr) {
1260 env->nb_watchpoints--;
1261 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1262 tlb_flush_page(env, addr);
1269 /* Remove all watchpoints. */
1270 void cpu_watchpoint_remove_all(CPUState *env) {
1273 for (i = 0; i < env->nb_watchpoints; i++) {
1274 tlb_flush_page(env, env->watchpoint[i].vaddr);
1276 env->nb_watchpoints = 0;
1279 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1280 breakpoint is reached */
1281 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1283 #if defined(TARGET_HAS_ICE)
1286 for(i = 0; i < env->nb_breakpoints; i++) {
1287 if (env->breakpoints[i] == pc)
1291 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1293 env->breakpoints[env->nb_breakpoints++] = pc;
1295 breakpoint_invalidate(env, pc);
1302 /* remove all breakpoints */
1303 void cpu_breakpoint_remove_all(CPUState *env) {
1304 #if defined(TARGET_HAS_ICE)
1306 for(i = 0; i < env->nb_breakpoints; i++) {
1307 breakpoint_invalidate(env, env->breakpoints[i]);
1309 env->nb_breakpoints = 0;
1313 /* remove a breakpoint */
1314 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1316 #if defined(TARGET_HAS_ICE)
1318 for(i = 0; i < env->nb_breakpoints; i++) {
1319 if (env->breakpoints[i] == pc)
1324 env->nb_breakpoints--;
1325 if (i < env->nb_breakpoints)
1326 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1328 breakpoint_invalidate(env, pc);
1335 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1336 CPU loop after each instruction */
1337 void cpu_single_step(CPUState *env, int enabled)
1339 #if defined(TARGET_HAS_ICE)
1340 if (env->singlestep_enabled != enabled) {
1341 env->singlestep_enabled = enabled;
1342 /* must flush all the translated code to avoid inconsistancies */
1343 /* XXX: only flush what is necessary */
1349 /* enable or disable low levels log */
1350 void cpu_set_log(int log_flags)
1352 loglevel = log_flags;
1353 if (loglevel && !logfile) {
1354 logfile = fopen(logfilename, log_append ? "a" : "w");
1356 perror(logfilename);
1359 #if !defined(CONFIG_SOFTMMU)
1360 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1362 static uint8_t logfile_buf[4096];
1363 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1366 setvbuf(logfile, NULL, _IOLBF, 0);
1370 if (!loglevel && logfile) {
1376 void cpu_set_log_filename(const char *filename)
1378 logfilename = strdup(filename);
1383 cpu_set_log(loglevel);
1386 /* mask must never be zero, except for A20 change call */
1387 void cpu_interrupt(CPUState *env, int mask)
1389 #if !defined(USE_NPTL)
1390 TranslationBlock *tb;
1391 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1395 old_mask = env->interrupt_request;
1396 /* FIXME: This is probably not threadsafe. A different thread could
1397 be in the mittle of a read-modify-write operation. */
1398 env->interrupt_request |= mask;
1399 #if defined(USE_NPTL)
1400 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1401 problem and hope the cpu will stop of its own accord. For userspace
1402 emulation this often isn't actually as bad as it sounds. Often
1403 signals are used primarily to interrupt blocking syscalls. */
1406 env->icount_decr.u16.high = 0x8000;
1407 #ifndef CONFIG_USER_ONLY
1408 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1409 an async event happened and we need to process it. */
1411 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1412 cpu_abort(env, "Raised interrupt while not in I/O function");
1416 tb = env->current_tb;
1417 /* if the cpu is currently executing code, we must unlink it and
1418 all the potentially executing TB */
1419 if (tb && !testandset(&interrupt_lock)) {
1420 env->current_tb = NULL;
1421 tb_reset_jump_recursive(tb);
1422 resetlock(&interrupt_lock);
1428 void cpu_reset_interrupt(CPUState *env, int mask)
1430 env->interrupt_request &= ~mask;
1433 CPULogItem cpu_log_items[] = {
1434 { CPU_LOG_TB_OUT_ASM, "out_asm",
1435 "show generated host assembly code for each compiled TB" },
1436 { CPU_LOG_TB_IN_ASM, "in_asm",
1437 "show target assembly code for each compiled TB" },
1438 { CPU_LOG_TB_OP, "op",
1439 "show micro ops for each compiled TB" },
1440 { CPU_LOG_TB_OP_OPT, "op_opt",
1443 "before eflags optimization and "
1445 "after liveness analysis" },
1446 { CPU_LOG_INT, "int",
1447 "show interrupts/exceptions in short format" },
1448 { CPU_LOG_EXEC, "exec",
1449 "show trace before each executed TB (lots of logs)" },
1450 { CPU_LOG_TB_CPU, "cpu",
1451 "show CPU state before block translation" },
1453 { CPU_LOG_PCALL, "pcall",
1454 "show protected mode far calls/returns/exceptions" },
1457 { CPU_LOG_IOPORT, "ioport",
1458 "show all i/o ports accesses" },
1463 static int cmp1(const char *s1, int n, const char *s2)
1465 if (strlen(s2) != n)
1467 return memcmp(s1, s2, n) == 0;
1470 /* takes a comma separated list of log masks. Return 0 if error. */
1471 int cpu_str_to_log_mask(const char *str)
1480 p1 = strchr(p, ',');
1483 if(cmp1(p,p1-p,"all")) {
1484 for(item = cpu_log_items; item->mask != 0; item++) {
1488 for(item = cpu_log_items; item->mask != 0; item++) {
1489 if (cmp1(p, p1 - p, item->name))
1503 void cpu_abort(CPUState *env, const char *fmt, ...)
1510 fprintf(stderr, "qemu: fatal: ");
1511 vfprintf(stderr, fmt, ap);
1512 fprintf(stderr, "\n");
1514 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1516 cpu_dump_state(env, stderr, fprintf, 0);
1519 fprintf(logfile, "qemu: fatal: ");
1520 vfprintf(logfile, fmt, ap2);
1521 fprintf(logfile, "\n");
1523 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1525 cpu_dump_state(env, logfile, fprintf, 0);
1535 CPUState *cpu_copy(CPUState *env)
1537 CPUState *new_env = cpu_init(env->cpu_model_str);
1538 /* preserve chaining and index */
1539 CPUState *next_cpu = new_env->next_cpu;
1540 int cpu_index = new_env->cpu_index;
1541 memcpy(new_env, env, sizeof(CPUState));
1542 new_env->next_cpu = next_cpu;
1543 new_env->cpu_index = cpu_index;
1547 #if !defined(CONFIG_USER_ONLY)
1549 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1553 /* Discard jump cache entries for any tb which might potentially
1554 overlap the flushed page. */
1555 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1556 memset (&env->tb_jmp_cache[i], 0,
1557 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1559 i = tb_jmp_cache_hash_page(addr);
1560 memset (&env->tb_jmp_cache[i], 0,
1561 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1564 /* NOTE: if flush_global is true, also flush global entries (not
1566 void tlb_flush(CPUState *env, int flush_global)
1570 #if defined(DEBUG_TLB)
1571 printf("tlb_flush:\n");
1573 /* must reset current TB so that interrupts cannot modify the
1574 links while we are modifying them */
1575 env->current_tb = NULL;
1577 for(i = 0; i < CPU_TLB_SIZE; i++) {
1578 env->tlb_table[0][i].addr_read = -1;
1579 env->tlb_table[0][i].addr_write = -1;
1580 env->tlb_table[0][i].addr_code = -1;
1581 env->tlb_table[1][i].addr_read = -1;
1582 env->tlb_table[1][i].addr_write = -1;
1583 env->tlb_table[1][i].addr_code = -1;
1584 #if (NB_MMU_MODES >= 3)
1585 env->tlb_table[2][i].addr_read = -1;
1586 env->tlb_table[2][i].addr_write = -1;
1587 env->tlb_table[2][i].addr_code = -1;
1588 #if (NB_MMU_MODES == 4)
1589 env->tlb_table[3][i].addr_read = -1;
1590 env->tlb_table[3][i].addr_write = -1;
1591 env->tlb_table[3][i].addr_code = -1;
1596 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1599 if (env->kqemu_enabled) {
1600 kqemu_flush(env, flush_global);
1606 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1608 if (addr == (tlb_entry->addr_read &
1609 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1610 addr == (tlb_entry->addr_write &
1611 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1612 addr == (tlb_entry->addr_code &
1613 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1614 tlb_entry->addr_read = -1;
1615 tlb_entry->addr_write = -1;
1616 tlb_entry->addr_code = -1;
1620 void tlb_flush_page(CPUState *env, target_ulong addr)
1624 #if defined(DEBUG_TLB)
1625 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1627 /* must reset current TB so that interrupts cannot modify the
1628 links while we are modifying them */
1629 env->current_tb = NULL;
1631 addr &= TARGET_PAGE_MASK;
1632 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1633 tlb_flush_entry(&env->tlb_table[0][i], addr);
1634 tlb_flush_entry(&env->tlb_table[1][i], addr);
1635 #if (NB_MMU_MODES >= 3)
1636 tlb_flush_entry(&env->tlb_table[2][i], addr);
1637 #if (NB_MMU_MODES == 4)
1638 tlb_flush_entry(&env->tlb_table[3][i], addr);
1642 tlb_flush_jmp_cache(env, addr);
1645 if (env->kqemu_enabled) {
1646 kqemu_flush_page(env, addr);
1651 /* update the TLBs so that writes to code in the virtual page 'addr'
1653 static void tlb_protect_code(ram_addr_t ram_addr)
1655 cpu_physical_memory_reset_dirty(ram_addr,
1656 ram_addr + TARGET_PAGE_SIZE,
1660 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1661 tested for self modifying code */
1662 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1665 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1668 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1669 unsigned long start, unsigned long length)
1672 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1673 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1674 if ((addr - start) < length) {
1675 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1680 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1684 unsigned long length, start1;
1688 start &= TARGET_PAGE_MASK;
1689 end = TARGET_PAGE_ALIGN(end);
1691 length = end - start;
1694 len = length >> TARGET_PAGE_BITS;
1696 /* XXX: should not depend on cpu context */
1698 if (env->kqemu_enabled) {
1701 for(i = 0; i < len; i++) {
1702 kqemu_set_notdirty(env, addr);
1703 addr += TARGET_PAGE_SIZE;
1707 mask = ~dirty_flags;
1708 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1709 for(i = 0; i < len; i++)
1712 /* we modify the TLB cache so that the dirty bit will be set again
1713 when accessing the range */
1714 start1 = start + (unsigned long)phys_ram_base;
1715 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1716 for(i = 0; i < CPU_TLB_SIZE; i++)
1717 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1718 for(i = 0; i < CPU_TLB_SIZE; i++)
1719 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1720 #if (NB_MMU_MODES >= 3)
1721 for(i = 0; i < CPU_TLB_SIZE; i++)
1722 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1723 #if (NB_MMU_MODES == 4)
1724 for(i = 0; i < CPU_TLB_SIZE; i++)
1725 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1731 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1733 ram_addr_t ram_addr;
1735 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1736 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1737 tlb_entry->addend - (unsigned long)phys_ram_base;
1738 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1739 tlb_entry->addr_write |= TLB_NOTDIRTY;
1744 /* update the TLB according to the current state of the dirty bits */
1745 void cpu_tlb_update_dirty(CPUState *env)
1748 for(i = 0; i < CPU_TLB_SIZE; i++)
1749 tlb_update_dirty(&env->tlb_table[0][i]);
1750 for(i = 0; i < CPU_TLB_SIZE; i++)
1751 tlb_update_dirty(&env->tlb_table[1][i]);
1752 #if (NB_MMU_MODES >= 3)
1753 for(i = 0; i < CPU_TLB_SIZE; i++)
1754 tlb_update_dirty(&env->tlb_table[2][i]);
1755 #if (NB_MMU_MODES == 4)
1756 for(i = 0; i < CPU_TLB_SIZE; i++)
1757 tlb_update_dirty(&env->tlb_table[3][i]);
1762 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1764 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1765 tlb_entry->addr_write = vaddr;
1768 /* update the TLB corresponding to virtual page vaddr
1769 so that it is no longer dirty */
1770 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1774 vaddr &= TARGET_PAGE_MASK;
1775 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1776 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1777 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1778 #if (NB_MMU_MODES >= 3)
1779 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1780 #if (NB_MMU_MODES == 4)
1781 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1786 /* add a new TLB entry. At most one entry for a given virtual address
1787 is permitted. Return 0 if OK or 2 if the page could not be mapped
1788 (can only happen in non SOFTMMU mode for I/O pages or pages
1789 conflicting with the host address space). */
1790 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1791 target_phys_addr_t paddr, int prot,
1792 int mmu_idx, int is_softmmu)
1797 target_ulong address;
1798 target_ulong code_address;
1799 target_phys_addr_t addend;
1803 target_phys_addr_t iotlb;
1805 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1807 pd = IO_MEM_UNASSIGNED;
1809 pd = p->phys_offset;
1811 #if defined(DEBUG_TLB)
1812 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1813 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1818 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1819 /* IO memory case (romd handled later) */
1820 address |= TLB_MMIO;
1822 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1823 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1825 iotlb = pd & TARGET_PAGE_MASK;
1826 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1827 iotlb |= IO_MEM_NOTDIRTY;
1829 iotlb |= IO_MEM_ROM;
1831 /* IO handlers are currently passed a phsical address.
1832 It would be nice to pass an offset from the base address
1833 of that region. This would avoid having to special case RAM,
1834 and avoid full address decoding in every device.
1835 We can't use the high bits of pd for this because
1836 IO_MEM_ROMD uses these as a ram address. */
1837 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1840 code_address = address;
1841 /* Make accesses to pages with watchpoints go via the
1842 watchpoint trap routines. */
1843 for (i = 0; i < env->nb_watchpoints; i++) {
1844 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1845 iotlb = io_mem_watch + paddr;
1846 /* TODO: The memory case can be optimized by not trapping
1847 reads of pages with a write breakpoint. */
1848 address |= TLB_MMIO;
1852 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1853 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1854 te = &env->tlb_table[mmu_idx][index];
1855 te->addend = addend - vaddr;
1856 if (prot & PAGE_READ) {
1857 te->addr_read = address;
1862 if (prot & PAGE_EXEC) {
1863 te->addr_code = code_address;
1867 if (prot & PAGE_WRITE) {
1868 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1869 (pd & IO_MEM_ROMD)) {
1870 /* Write access calls the I/O callback. */
1871 te->addr_write = address | TLB_MMIO;
1872 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1873 !cpu_physical_memory_is_dirty(pd)) {
1874 te->addr_write = address | TLB_NOTDIRTY;
1876 te->addr_write = address;
1879 te->addr_write = -1;
1886 void tlb_flush(CPUState *env, int flush_global)
1890 void tlb_flush_page(CPUState *env, target_ulong addr)
1894 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1895 target_phys_addr_t paddr, int prot,
1896 int mmu_idx, int is_softmmu)
1901 /* dump memory mappings */
1902 void page_dump(FILE *f)
1904 unsigned long start, end;
1905 int i, j, prot, prot1;
1908 fprintf(f, "%-8s %-8s %-8s %s\n",
1909 "start", "end", "size", "prot");
1913 for(i = 0; i <= L1_SIZE; i++) {
1918 for(j = 0;j < L2_SIZE; j++) {
1923 if (prot1 != prot) {
1924 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1926 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1927 start, end, end - start,
1928 prot & PAGE_READ ? 'r' : '-',
1929 prot & PAGE_WRITE ? 'w' : '-',
1930 prot & PAGE_EXEC ? 'x' : '-');
1944 int page_get_flags(target_ulong address)
1948 p = page_find(address >> TARGET_PAGE_BITS);
1954 /* modify the flags of a page and invalidate the code if
1955 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1956 depending on PAGE_WRITE */
1957 void page_set_flags(target_ulong start, target_ulong end, int flags)
1962 /* mmap_lock should already be held. */
1963 start = start & TARGET_PAGE_MASK;
1964 end = TARGET_PAGE_ALIGN(end);
1965 if (flags & PAGE_WRITE)
1966 flags |= PAGE_WRITE_ORG;
1967 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1968 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1969 /* We may be called for host regions that are outside guest
1973 /* if the write protection is set, then we invalidate the code
1975 if (!(p->flags & PAGE_WRITE) &&
1976 (flags & PAGE_WRITE) &&
1978 tb_invalidate_phys_page(addr, 0, NULL);
1984 int page_check_range(target_ulong start, target_ulong len, int flags)
1990 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1991 start = start & TARGET_PAGE_MASK;
1994 /* we've wrapped around */
1996 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1997 p = page_find(addr >> TARGET_PAGE_BITS);
2000 if( !(p->flags & PAGE_VALID) )
2003 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2005 if (flags & PAGE_WRITE) {
2006 if (!(p->flags & PAGE_WRITE_ORG))
2008 /* unprotect the page if it was put read-only because it
2009 contains translated code */
2010 if (!(p->flags & PAGE_WRITE)) {
2011 if (!page_unprotect(addr, 0, NULL))
2020 /* called from signal handler: invalidate the code and unprotect the
2021 page. Return TRUE if the fault was succesfully handled. */
2022 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2024 unsigned int page_index, prot, pindex;
2026 target_ulong host_start, host_end, addr;
2028 /* Technically this isn't safe inside a signal handler. However we
2029 know this only ever happens in a synchronous SEGV handler, so in
2030 practice it seems to be ok. */
2033 host_start = address & qemu_host_page_mask;
2034 page_index = host_start >> TARGET_PAGE_BITS;
2035 p1 = page_find(page_index);
2040 host_end = host_start + qemu_host_page_size;
2043 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2047 /* if the page was really writable, then we change its
2048 protection back to writable */
2049 if (prot & PAGE_WRITE_ORG) {
2050 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2051 if (!(p1[pindex].flags & PAGE_WRITE)) {
2052 mprotect((void *)g2h(host_start), qemu_host_page_size,
2053 (prot & PAGE_BITS) | PAGE_WRITE);
2054 p1[pindex].flags |= PAGE_WRITE;
2055 /* and since the content will be modified, we must invalidate
2056 the corresponding translated code. */
2057 tb_invalidate_phys_page(address, pc, puc);
2058 #ifdef DEBUG_TB_CHECK
2059 tb_invalidate_check(address);
2069 static inline void tlb_set_dirty(CPUState *env,
2070 unsigned long addr, target_ulong vaddr)
2073 #endif /* defined(CONFIG_USER_ONLY) */
2075 #if !defined(CONFIG_USER_ONLY)
2076 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2078 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2079 ram_addr_t orig_memory);
2080 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2083 if (addr > start_addr) \
2086 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2087 if (start_addr2 > 0) \
2091 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2092 end_addr2 = TARGET_PAGE_SIZE - 1; \
2094 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2095 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2100 /* register physical memory. 'size' must be a multiple of the target
2101 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2103 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2105 ram_addr_t phys_offset)
2107 target_phys_addr_t addr, end_addr;
2110 ram_addr_t orig_size = size;
2114 /* XXX: should not depend on cpu context */
2116 if (env->kqemu_enabled) {
2117 kqemu_set_phys_mem(start_addr, size, phys_offset);
2120 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2121 end_addr = start_addr + (target_phys_addr_t)size;
2122 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2123 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2124 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2125 ram_addr_t orig_memory = p->phys_offset;
2126 target_phys_addr_t start_addr2, end_addr2;
2127 int need_subpage = 0;
2129 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2131 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2132 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2133 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2134 &p->phys_offset, orig_memory);
2136 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2139 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2141 p->phys_offset = phys_offset;
2142 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2143 (phys_offset & IO_MEM_ROMD))
2144 phys_offset += TARGET_PAGE_SIZE;
2147 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2148 p->phys_offset = phys_offset;
2149 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2150 (phys_offset & IO_MEM_ROMD))
2151 phys_offset += TARGET_PAGE_SIZE;
2153 target_phys_addr_t start_addr2, end_addr2;
2154 int need_subpage = 0;
2156 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2157 end_addr2, need_subpage);
2159 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2160 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2161 &p->phys_offset, IO_MEM_UNASSIGNED);
2162 subpage_register(subpage, start_addr2, end_addr2,
2169 /* since each CPU stores ram addresses in its TLB cache, we must
2170 reset the modified entries */
2172 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2177 /* XXX: temporary until new memory mapping API */
2178 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2182 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2184 return IO_MEM_UNASSIGNED;
2185 return p->phys_offset;
2188 /* XXX: better than nothing */
2189 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2192 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2193 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2194 (uint64_t)size, (uint64_t)phys_ram_size);
2197 addr = phys_ram_alloc_offset;
2198 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2202 void qemu_ram_free(ram_addr_t addr)
2206 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2208 #ifdef DEBUG_UNASSIGNED
2209 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2212 do_unassigned_access(addr, 0, 0, 0);
2214 do_unassigned_access(addr, 0, 0, 0);
2219 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2221 #ifdef DEBUG_UNASSIGNED
2222 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2225 do_unassigned_access(addr, 1, 0, 0);
2227 do_unassigned_access(addr, 1, 0, 0);
2231 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2232 unassigned_mem_readb,
2233 unassigned_mem_readb,
2234 unassigned_mem_readb,
2237 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2238 unassigned_mem_writeb,
2239 unassigned_mem_writeb,
2240 unassigned_mem_writeb,
2243 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2247 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2248 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2249 #if !defined(CONFIG_USER_ONLY)
2250 tb_invalidate_phys_page_fast(ram_addr, 1);
2251 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2254 stb_p(phys_ram_base + ram_addr, val);
2256 if (cpu_single_env->kqemu_enabled &&
2257 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2258 kqemu_modify_page(cpu_single_env, ram_addr);
2260 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2261 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2262 /* we remove the notdirty callback only if the code has been
2264 if (dirty_flags == 0xff)
2265 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2268 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2272 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2273 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2274 #if !defined(CONFIG_USER_ONLY)
2275 tb_invalidate_phys_page_fast(ram_addr, 2);
2276 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2279 stw_p(phys_ram_base + ram_addr, val);
2281 if (cpu_single_env->kqemu_enabled &&
2282 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2283 kqemu_modify_page(cpu_single_env, ram_addr);
2285 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2286 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2287 /* we remove the notdirty callback only if the code has been
2289 if (dirty_flags == 0xff)
2290 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2293 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2297 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2298 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2299 #if !defined(CONFIG_USER_ONLY)
2300 tb_invalidate_phys_page_fast(ram_addr, 4);
2301 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2304 stl_p(phys_ram_base + ram_addr, val);
2306 if (cpu_single_env->kqemu_enabled &&
2307 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2308 kqemu_modify_page(cpu_single_env, ram_addr);
2310 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2311 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2312 /* we remove the notdirty callback only if the code has been
2314 if (dirty_flags == 0xff)
2315 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2318 static CPUReadMemoryFunc *error_mem_read[3] = {
2319 NULL, /* never used */
2320 NULL, /* never used */
2321 NULL, /* never used */
2324 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2325 notdirty_mem_writeb,
2326 notdirty_mem_writew,
2327 notdirty_mem_writel,
2330 /* Generate a debug exception if a watchpoint has been hit. */
2331 static void check_watchpoint(int offset, int flags)
2333 CPUState *env = cpu_single_env;
2337 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2338 for (i = 0; i < env->nb_watchpoints; i++) {
2339 if (vaddr == env->watchpoint[i].vaddr
2340 && (env->watchpoint[i].type & flags)) {
2341 env->watchpoint_hit = i + 1;
2342 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2348 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2349 so these check for a hit then pass through to the normal out-of-line
2351 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2353 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2354 return ldub_phys(addr);
2357 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2359 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2360 return lduw_phys(addr);
2363 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2365 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2366 return ldl_phys(addr);
2369 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2372 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2373 stb_phys(addr, val);
2376 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2379 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2380 stw_phys(addr, val);
2383 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2386 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2387 stl_phys(addr, val);
2390 static CPUReadMemoryFunc *watch_mem_read[3] = {
2396 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2402 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2408 idx = SUBPAGE_IDX(addr - mmio->base);
2409 #if defined(DEBUG_SUBPAGE)
2410 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2411 mmio, len, addr, idx);
2413 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2418 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2419 uint32_t value, unsigned int len)
2423 idx = SUBPAGE_IDX(addr - mmio->base);
2424 #if defined(DEBUG_SUBPAGE)
2425 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2426 mmio, len, addr, idx, value);
2428 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2431 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2433 #if defined(DEBUG_SUBPAGE)
2434 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2437 return subpage_readlen(opaque, addr, 0);
2440 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2443 #if defined(DEBUG_SUBPAGE)
2444 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2446 subpage_writelen(opaque, addr, value, 0);
2449 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2451 #if defined(DEBUG_SUBPAGE)
2452 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2455 return subpage_readlen(opaque, addr, 1);
2458 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2461 #if defined(DEBUG_SUBPAGE)
2462 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2464 subpage_writelen(opaque, addr, value, 1);
2467 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2469 #if defined(DEBUG_SUBPAGE)
2470 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2473 return subpage_readlen(opaque, addr, 2);
2476 static void subpage_writel (void *opaque,
2477 target_phys_addr_t addr, uint32_t value)
2479 #if defined(DEBUG_SUBPAGE)
2480 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2482 subpage_writelen(opaque, addr, value, 2);
2485 static CPUReadMemoryFunc *subpage_read[] = {
2491 static CPUWriteMemoryFunc *subpage_write[] = {
2497 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2503 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2505 idx = SUBPAGE_IDX(start);
2506 eidx = SUBPAGE_IDX(end);
2507 #if defined(DEBUG_SUBPAGE)
2508 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2509 mmio, start, end, idx, eidx, memory);
2511 memory >>= IO_MEM_SHIFT;
2512 for (; idx <= eidx; idx++) {
2513 for (i = 0; i < 4; i++) {
2514 if (io_mem_read[memory][i]) {
2515 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2516 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2518 if (io_mem_write[memory][i]) {
2519 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2520 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2528 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2529 ram_addr_t orig_memory)
2534 mmio = qemu_mallocz(sizeof(subpage_t));
2537 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2538 #if defined(DEBUG_SUBPAGE)
2539 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2540 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2542 *phys = subpage_memory | IO_MEM_SUBPAGE;
2543 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2549 static void io_mem_init(void)
2551 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2552 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2553 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2556 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2557 watch_mem_write, NULL);
2558 /* alloc dirty bits array */
2559 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2560 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2563 /* mem_read and mem_write are arrays of functions containing the
2564 function to access byte (index 0), word (index 1) and dword (index
2565 2). Functions can be omitted with a NULL function pointer. The
2566 registered functions may be modified dynamically later.
2567 If io_index is non zero, the corresponding io zone is
2568 modified. If it is zero, a new io zone is allocated. The return
2569 value can be used with cpu_register_physical_memory(). (-1) is
2570 returned if error. */
2571 int cpu_register_io_memory(int io_index,
2572 CPUReadMemoryFunc **mem_read,
2573 CPUWriteMemoryFunc **mem_write,
2576 int i, subwidth = 0;
2578 if (io_index <= 0) {
2579 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2581 io_index = io_mem_nb++;
2583 if (io_index >= IO_MEM_NB_ENTRIES)
2587 for(i = 0;i < 3; i++) {
2588 if (!mem_read[i] || !mem_write[i])
2589 subwidth = IO_MEM_SUBWIDTH;
2590 io_mem_read[io_index][i] = mem_read[i];
2591 io_mem_write[io_index][i] = mem_write[i];
2593 io_mem_opaque[io_index] = opaque;
2594 return (io_index << IO_MEM_SHIFT) | subwidth;
2597 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2599 return io_mem_write[io_index >> IO_MEM_SHIFT];
2602 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2604 return io_mem_read[io_index >> IO_MEM_SHIFT];
2607 #endif /* !defined(CONFIG_USER_ONLY) */
2609 /* physical memory access (slow version, mainly for debug) */
2610 #if defined(CONFIG_USER_ONLY)
2611 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2612 int len, int is_write)
2619 page = addr & TARGET_PAGE_MASK;
2620 l = (page + TARGET_PAGE_SIZE) - addr;
2623 flags = page_get_flags(page);
2624 if (!(flags & PAGE_VALID))
2627 if (!(flags & PAGE_WRITE))
2629 /* XXX: this code should not depend on lock_user */
2630 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2631 /* FIXME - should this return an error rather than just fail? */
2634 unlock_user(p, addr, l);
2636 if (!(flags & PAGE_READ))
2638 /* XXX: this code should not depend on lock_user */
2639 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2640 /* FIXME - should this return an error rather than just fail? */
2643 unlock_user(p, addr, 0);
2652 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2653 int len, int is_write)
2658 target_phys_addr_t page;
2663 page = addr & TARGET_PAGE_MASK;
2664 l = (page + TARGET_PAGE_SIZE) - addr;
2667 p = phys_page_find(page >> TARGET_PAGE_BITS);
2669 pd = IO_MEM_UNASSIGNED;
2671 pd = p->phys_offset;
2675 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2676 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2677 /* XXX: could force cpu_single_env to NULL to avoid
2679 if (l >= 4 && ((addr & 3) == 0)) {
2680 /* 32 bit write access */
2682 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2684 } else if (l >= 2 && ((addr & 1) == 0)) {
2685 /* 16 bit write access */
2687 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2690 /* 8 bit write access */
2692 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2696 unsigned long addr1;
2697 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2699 ptr = phys_ram_base + addr1;
2700 memcpy(ptr, buf, l);
2701 if (!cpu_physical_memory_is_dirty(addr1)) {
2702 /* invalidate code */
2703 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2705 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2706 (0xff & ~CODE_DIRTY_FLAG);
2710 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2711 !(pd & IO_MEM_ROMD)) {
2713 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2714 if (l >= 4 && ((addr & 3) == 0)) {
2715 /* 32 bit read access */
2716 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2719 } else if (l >= 2 && ((addr & 1) == 0)) {
2720 /* 16 bit read access */
2721 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2725 /* 8 bit read access */
2726 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2732 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2733 (addr & ~TARGET_PAGE_MASK);
2734 memcpy(buf, ptr, l);
2743 /* used for ROM loading : can write in RAM and ROM */
2744 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2745 const uint8_t *buf, int len)
2749 target_phys_addr_t page;
2754 page = addr & TARGET_PAGE_MASK;
2755 l = (page + TARGET_PAGE_SIZE) - addr;
2758 p = phys_page_find(page >> TARGET_PAGE_BITS);
2760 pd = IO_MEM_UNASSIGNED;
2762 pd = p->phys_offset;
2765 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2766 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2767 !(pd & IO_MEM_ROMD)) {
2770 unsigned long addr1;
2771 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2773 ptr = phys_ram_base + addr1;
2774 memcpy(ptr, buf, l);
2783 /* warning: addr must be aligned */
2784 uint32_t ldl_phys(target_phys_addr_t addr)
2792 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2794 pd = IO_MEM_UNASSIGNED;
2796 pd = p->phys_offset;
2799 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2800 !(pd & IO_MEM_ROMD)) {
2802 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2803 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2806 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2807 (addr & ~TARGET_PAGE_MASK);
2813 /* warning: addr must be aligned */
2814 uint64_t ldq_phys(target_phys_addr_t addr)
2822 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2824 pd = IO_MEM_UNASSIGNED;
2826 pd = p->phys_offset;
2829 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2830 !(pd & IO_MEM_ROMD)) {
2832 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2833 #ifdef TARGET_WORDS_BIGENDIAN
2834 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2835 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2837 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2838 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2842 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2843 (addr & ~TARGET_PAGE_MASK);
2850 uint32_t ldub_phys(target_phys_addr_t addr)
2853 cpu_physical_memory_read(addr, &val, 1);
2858 uint32_t lduw_phys(target_phys_addr_t addr)
2861 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2862 return tswap16(val);
2865 /* warning: addr must be aligned. The ram page is not masked as dirty
2866 and the code inside is not invalidated. It is useful if the dirty
2867 bits are used to track modified PTEs */
2868 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2875 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2877 pd = IO_MEM_UNASSIGNED;
2879 pd = p->phys_offset;
2882 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2883 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2884 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2886 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2887 (addr & ~TARGET_PAGE_MASK);
2892 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2899 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2901 pd = IO_MEM_UNASSIGNED;
2903 pd = p->phys_offset;
2906 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2907 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2908 #ifdef TARGET_WORDS_BIGENDIAN
2909 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2910 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2912 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2913 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2916 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2917 (addr & ~TARGET_PAGE_MASK);
2922 /* warning: addr must be aligned */
2923 void stl_phys(target_phys_addr_t addr, uint32_t val)
2930 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2932 pd = IO_MEM_UNASSIGNED;
2934 pd = p->phys_offset;
2937 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2938 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2939 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2941 unsigned long addr1;
2942 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2944 ptr = phys_ram_base + addr1;
2946 if (!cpu_physical_memory_is_dirty(addr1)) {
2947 /* invalidate code */
2948 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2950 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2951 (0xff & ~CODE_DIRTY_FLAG);
2957 void stb_phys(target_phys_addr_t addr, uint32_t val)
2960 cpu_physical_memory_write(addr, &v, 1);
2964 void stw_phys(target_phys_addr_t addr, uint32_t val)
2966 uint16_t v = tswap16(val);
2967 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2971 void stq_phys(target_phys_addr_t addr, uint64_t val)
2974 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2979 /* virtual memory access for debug */
2980 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2981 uint8_t *buf, int len, int is_write)
2984 target_phys_addr_t phys_addr;
2988 page = addr & TARGET_PAGE_MASK;
2989 phys_addr = cpu_get_phys_page_debug(env, page);
2990 /* if no physical page mapped, return an error */
2991 if (phys_addr == -1)
2993 l = (page + TARGET_PAGE_SIZE) - addr;
2996 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3005 /* in deterministic execution mode, instructions doing device I/Os
3006 must be at the end of the TB */
3007 void cpu_io_recompile(CPUState *env, void *retaddr)
3009 TranslationBlock *tb;
3011 target_ulong pc, cs_base;
3014 tb = tb_find_pc((unsigned long)retaddr);
3016 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3019 n = env->icount_decr.u16.low + tb->icount;
3020 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3021 /* Calculate how many instructions had been executed before the fault
3023 n = n - env->icount_decr.u16.low;
3024 /* Generate a new TB ending on the I/O insn. */
3026 /* On MIPS and SH, delay slot instructions can only be restarted if
3027 they were already the first instruction in the TB. If this is not
3028 the first instruction in a TB then re-execute the preceeding
3030 #if defined(TARGET_MIPS)
3031 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3032 env->active_tc.PC -= 4;
3033 env->icount_decr.u16.low++;
3034 env->hflags &= ~MIPS_HFLAG_BMASK;
3036 #elif defined(TARGET_SH4)
3037 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3040 env->icount_decr.u16.low++;
3041 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3044 /* This should never happen. */
3045 if (n > CF_COUNT_MASK)
3046 cpu_abort(env, "TB too big during recompile");
3048 cflags = n | CF_LAST_IO;
3050 cs_base = tb->cs_base;
3052 tb_phys_invalidate(tb, -1);
3053 /* FIXME: In theory this could raise an exception. In practice
3054 we have already translated the block once so it's probably ok. */
3055 tb_gen_code(env, pc, cs_base, flags, cflags);
3056 /* TODO: If env->pc != tb->pc (i.e. the failuting instruction was not
3057 the first in the TB) then we end up generating a whole new TB and
3058 repeating the fault, which is horribly inefficient.
3059 Better would be to execute just this insn uncached, or generate a
3061 cpu_resume_from_signal(env, NULL);
3064 void dump_exec_info(FILE *f,
3065 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3067 int i, target_code_size, max_target_code_size;
3068 int direct_jmp_count, direct_jmp2_count, cross_page;
3069 TranslationBlock *tb;
3071 target_code_size = 0;
3072 max_target_code_size = 0;
3074 direct_jmp_count = 0;
3075 direct_jmp2_count = 0;
3076 for(i = 0; i < nb_tbs; i++) {
3078 target_code_size += tb->size;
3079 if (tb->size > max_target_code_size)
3080 max_target_code_size = tb->size;
3081 if (tb->page_addr[1] != -1)
3083 if (tb->tb_next_offset[0] != 0xffff) {
3085 if (tb->tb_next_offset[1] != 0xffff) {
3086 direct_jmp2_count++;
3090 /* XXX: avoid using doubles ? */
3091 cpu_fprintf(f, "Translation buffer state:\n");
3092 cpu_fprintf(f, "gen code size %ld/%ld\n",
3093 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3094 cpu_fprintf(f, "TB count %d/%d\n",
3095 nb_tbs, code_gen_max_blocks);
3096 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3097 nb_tbs ? target_code_size / nb_tbs : 0,
3098 max_target_code_size);
3099 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3100 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3101 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3102 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3104 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3105 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3107 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3109 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3110 cpu_fprintf(f, "\nStatistics:\n");
3111 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3112 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3113 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3114 tcg_dump_info(f, cpu_fprintf);
3117 #if !defined(CONFIG_USER_ONLY)
3119 #define MMUSUFFIX _cmmu
3120 #define GETPC() NULL
3121 #define env cpu_single_env
3122 #define SOFTMMU_CODE_ACCESS
3125 #include "softmmu_template.h"
3128 #include "softmmu_template.h"
3131 #include "softmmu_template.h"
3134 #include "softmmu_template.h"