2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 #if !defined(CONFIG_USER_ONLY)
51 /* TB consistency checks only implemented for usermode emulation. */
55 /* threshold to flush the translated code buffer */
56 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
58 #define SMC_BITMAP_USE_THRESHOLD 10
60 #define MMAP_AREA_START 0x00000000
61 #define MMAP_AREA_END 0xa8000000
63 #if defined(TARGET_SPARC64)
64 #define TARGET_PHYS_ADDR_SPACE_BITS 41
65 #elif defined(TARGET_PPC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 42
68 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
69 #define TARGET_PHYS_ADDR_SPACE_BITS 32
72 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
73 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
75 /* any access to the tbs or the page table must use this lock */
76 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
78 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
79 uint8_t *code_gen_ptr;
83 uint8_t *phys_ram_base;
84 uint8_t *phys_ram_dirty;
85 static ram_addr_t phys_ram_alloc_offset = 0;
88 /* current CPU in the current thread. It is only valid inside
90 CPUState *cpu_single_env;
92 typedef struct PageDesc {
93 /* list of TBs intersecting this ram page */
94 TranslationBlock *first_tb;
95 /* in order to optimize self modifying code, we count the number
96 of lookups we do to a given page to use a bitmap */
97 unsigned int code_write_count;
99 #if defined(CONFIG_USER_ONLY)
104 typedef struct PhysPageDesc {
105 /* offset in host memory of the page + io_index in the low 12 bits */
106 uint32_t phys_offset;
110 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
112 #define L1_SIZE (1 << L1_BITS)
113 #define L2_SIZE (1 << L2_BITS)
115 static void io_mem_init(void);
117 unsigned long qemu_real_host_page_size;
118 unsigned long qemu_host_page_bits;
119 unsigned long qemu_host_page_size;
120 unsigned long qemu_host_page_mask;
122 /* XXX: for system emulation, it could just be an array */
123 static PageDesc *l1_map[L1_SIZE];
124 PhysPageDesc **l1_phys_map;
126 /* io memory support */
127 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
128 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
129 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
130 static int io_mem_nb;
133 char *logfilename = "/tmp/qemu.log";
138 static int tlb_flush_count;
139 static int tb_flush_count;
140 static int tb_phys_invalidate_count;
142 static void page_init(void)
144 /* NOTE: we can always suppose that qemu_host_page_size >=
148 SYSTEM_INFO system_info;
151 GetSystemInfo(&system_info);
152 qemu_real_host_page_size = system_info.dwPageSize;
154 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
155 PAGE_EXECUTE_READWRITE, &old_protect);
158 qemu_real_host_page_size = getpagesize();
160 unsigned long start, end;
162 start = (unsigned long)code_gen_buffer;
163 start &= ~(qemu_real_host_page_size - 1);
165 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
166 end += qemu_real_host_page_size - 1;
167 end &= ~(qemu_real_host_page_size - 1);
169 mprotect((void *)start, end - start,
170 PROT_READ | PROT_WRITE | PROT_EXEC);
174 if (qemu_host_page_size == 0)
175 qemu_host_page_size = qemu_real_host_page_size;
176 if (qemu_host_page_size < TARGET_PAGE_SIZE)
177 qemu_host_page_size = TARGET_PAGE_SIZE;
178 qemu_host_page_bits = 0;
179 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
180 qemu_host_page_bits++;
181 qemu_host_page_mask = ~(qemu_host_page_size - 1);
182 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
183 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
186 static inline PageDesc *page_find_alloc(unsigned int index)
190 lp = &l1_map[index >> L2_BITS];
193 /* allocate if not found */
194 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
195 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
198 return p + (index & (L2_SIZE - 1));
201 static inline PageDesc *page_find(unsigned int index)
205 p = l1_map[index >> L2_BITS];
208 return p + (index & (L2_SIZE - 1));
211 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
216 p = (void **)l1_phys_map;
217 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
219 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
220 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
222 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
225 /* allocate if not found */
228 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
229 memset(p, 0, sizeof(void *) * L1_SIZE);
233 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
237 /* allocate if not found */
240 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
242 for (i = 0; i < L2_SIZE; i++)
243 pd[i].phys_offset = IO_MEM_UNASSIGNED;
245 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
248 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
250 return phys_page_find_alloc(index, 0);
253 #if !defined(CONFIG_USER_ONLY)
254 static void tlb_protect_code(ram_addr_t ram_addr);
255 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
259 void cpu_exec_init(CPUState *env)
265 code_gen_ptr = code_gen_buffer;
269 env->next_cpu = NULL;
272 while (*penv != NULL) {
273 penv = (CPUState **)&(*penv)->next_cpu;
276 env->cpu_index = cpu_index;
280 static inline void invalidate_page_bitmap(PageDesc *p)
282 if (p->code_bitmap) {
283 qemu_free(p->code_bitmap);
284 p->code_bitmap = NULL;
286 p->code_write_count = 0;
289 /* set to NULL all the 'first_tb' fields in all PageDescs */
290 static void page_flush_tb(void)
295 for(i = 0; i < L1_SIZE; i++) {
298 for(j = 0; j < L2_SIZE; j++) {
300 invalidate_page_bitmap(p);
307 /* flush all the translation blocks */
308 /* XXX: tb_flush is currently not thread safe */
309 void tb_flush(CPUState *env1)
312 #if defined(DEBUG_FLUSH)
313 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
314 code_gen_ptr - code_gen_buffer,
316 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
320 for(env = first_cpu; env != NULL; env = env->next_cpu) {
321 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
324 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
327 code_gen_ptr = code_gen_buffer;
328 /* XXX: flush processor icache at this point if cache flush is
333 #ifdef DEBUG_TB_CHECK
335 static void tb_invalidate_check(unsigned long address)
337 TranslationBlock *tb;
339 address &= TARGET_PAGE_MASK;
340 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
341 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
342 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
343 address >= tb->pc + tb->size)) {
344 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
345 address, (long)tb->pc, tb->size);
351 /* verify that all the pages have correct rights for code */
352 static void tb_page_check(void)
354 TranslationBlock *tb;
355 int i, flags1, flags2;
357 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
358 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
359 flags1 = page_get_flags(tb->pc);
360 flags2 = page_get_flags(tb->pc + tb->size - 1);
361 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
362 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
363 (long)tb->pc, tb->size, flags1, flags2);
369 void tb_jmp_check(TranslationBlock *tb)
371 TranslationBlock *tb1;
374 /* suppress any remaining jumps to this TB */
378 tb1 = (TranslationBlock *)((long)tb1 & ~3);
381 tb1 = tb1->jmp_next[n1];
383 /* check end of list */
385 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
391 /* invalidate one TB */
392 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
395 TranslationBlock *tb1;
399 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
402 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
406 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
408 TranslationBlock *tb1;
414 tb1 = (TranslationBlock *)((long)tb1 & ~3);
416 *ptb = tb1->page_next[n1];
419 ptb = &tb1->page_next[n1];
423 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
425 TranslationBlock *tb1, **ptb;
428 ptb = &tb->jmp_next[n];
431 /* find tb(n) in circular list */
435 tb1 = (TranslationBlock *)((long)tb1 & ~3);
436 if (n1 == n && tb1 == tb)
439 ptb = &tb1->jmp_first;
441 ptb = &tb1->jmp_next[n1];
444 /* now we can suppress tb(n) from the list */
445 *ptb = tb->jmp_next[n];
447 tb->jmp_next[n] = NULL;
451 /* reset the jump entry 'n' of a TB so that it is not chained to
453 static inline void tb_reset_jump(TranslationBlock *tb, int n)
455 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
458 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
463 target_ulong phys_pc;
464 TranslationBlock *tb1, *tb2;
466 /* remove the TB from the hash list */
467 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
468 h = tb_phys_hash_func(phys_pc);
469 tb_remove(&tb_phys_hash[h], tb,
470 offsetof(TranslationBlock, phys_hash_next));
472 /* remove the TB from the page list */
473 if (tb->page_addr[0] != page_addr) {
474 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
475 tb_page_remove(&p->first_tb, tb);
476 invalidate_page_bitmap(p);
478 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
479 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
480 tb_page_remove(&p->first_tb, tb);
481 invalidate_page_bitmap(p);
484 tb_invalidated_flag = 1;
486 /* remove the TB from the hash list */
487 h = tb_jmp_cache_hash_func(tb->pc);
488 for(env = first_cpu; env != NULL; env = env->next_cpu) {
489 if (env->tb_jmp_cache[h] == tb)
490 env->tb_jmp_cache[h] = NULL;
493 /* suppress this TB from the two jump lists */
494 tb_jmp_remove(tb, 0);
495 tb_jmp_remove(tb, 1);
497 /* suppress any remaining jumps to this TB */
503 tb1 = (TranslationBlock *)((long)tb1 & ~3);
504 tb2 = tb1->jmp_next[n1];
505 tb_reset_jump(tb1, n1);
506 tb1->jmp_next[n1] = NULL;
509 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
511 tb_phys_invalidate_count++;
514 static inline void set_bits(uint8_t *tab, int start, int len)
520 mask = 0xff << (start & 7);
521 if ((start & ~7) == (end & ~7)) {
523 mask &= ~(0xff << (end & 7));
528 start = (start + 8) & ~7;
530 while (start < end1) {
535 mask = ~(0xff << (end & 7));
541 static void build_page_bitmap(PageDesc *p)
543 int n, tb_start, tb_end;
544 TranslationBlock *tb;
546 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
549 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
554 tb = (TranslationBlock *)((long)tb & ~3);
555 /* NOTE: this is subtle as a TB may span two physical pages */
557 /* NOTE: tb_end may be after the end of the page, but
558 it is not a problem */
559 tb_start = tb->pc & ~TARGET_PAGE_MASK;
560 tb_end = tb_start + tb->size;
561 if (tb_end > TARGET_PAGE_SIZE)
562 tb_end = TARGET_PAGE_SIZE;
565 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
567 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
568 tb = tb->page_next[n];
572 #ifdef TARGET_HAS_PRECISE_SMC
574 static void tb_gen_code(CPUState *env,
575 target_ulong pc, target_ulong cs_base, int flags,
578 TranslationBlock *tb;
580 target_ulong phys_pc, phys_page2, virt_page2;
583 phys_pc = get_phys_addr_code(env, pc);
586 /* flush must be done */
588 /* cannot fail at this point */
591 tc_ptr = code_gen_ptr;
593 tb->cs_base = cs_base;
596 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
597 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
599 /* check next page if needed */
600 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
602 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
603 phys_page2 = get_phys_addr_code(env, virt_page2);
605 tb_link_phys(tb, phys_pc, phys_page2);
609 /* invalidate all TBs which intersect with the target physical page
610 starting in range [start;end[. NOTE: start and end must refer to
611 the same physical page. 'is_cpu_write_access' should be true if called
612 from a real cpu write access: the virtual CPU will exit the current
613 TB if code is modified inside this TB. */
614 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
615 int is_cpu_write_access)
617 int n, current_tb_modified, current_tb_not_found, current_flags;
618 CPUState *env = cpu_single_env;
620 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
621 target_ulong tb_start, tb_end;
622 target_ulong current_pc, current_cs_base;
624 p = page_find(start >> TARGET_PAGE_BITS);
627 if (!p->code_bitmap &&
628 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
629 is_cpu_write_access) {
630 /* build code bitmap */
631 build_page_bitmap(p);
634 /* we remove all the TBs in the range [start, end[ */
635 /* XXX: see if in some cases it could be faster to invalidate all the code */
636 current_tb_not_found = is_cpu_write_access;
637 current_tb_modified = 0;
638 current_tb = NULL; /* avoid warning */
639 current_pc = 0; /* avoid warning */
640 current_cs_base = 0; /* avoid warning */
641 current_flags = 0; /* avoid warning */
645 tb = (TranslationBlock *)((long)tb & ~3);
646 tb_next = tb->page_next[n];
647 /* NOTE: this is subtle as a TB may span two physical pages */
649 /* NOTE: tb_end may be after the end of the page, but
650 it is not a problem */
651 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
652 tb_end = tb_start + tb->size;
654 tb_start = tb->page_addr[1];
655 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
657 if (!(tb_end <= start || tb_start >= end)) {
658 #ifdef TARGET_HAS_PRECISE_SMC
659 if (current_tb_not_found) {
660 current_tb_not_found = 0;
662 if (env->mem_write_pc) {
663 /* now we have a real cpu fault */
664 current_tb = tb_find_pc(env->mem_write_pc);
667 if (current_tb == tb &&
668 !(current_tb->cflags & CF_SINGLE_INSN)) {
669 /* If we are modifying the current TB, we must stop
670 its execution. We could be more precise by checking
671 that the modification is after the current PC, but it
672 would require a specialized function to partially
673 restore the CPU state */
675 current_tb_modified = 1;
676 cpu_restore_state(current_tb, env,
677 env->mem_write_pc, NULL);
678 #if defined(TARGET_I386)
679 current_flags = env->hflags;
680 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
681 current_cs_base = (target_ulong)env->segs[R_CS].base;
682 current_pc = current_cs_base + env->eip;
684 #error unsupported CPU
687 #endif /* TARGET_HAS_PRECISE_SMC */
688 /* we need to do that to handle the case where a signal
689 occurs while doing tb_phys_invalidate() */
692 saved_tb = env->current_tb;
693 env->current_tb = NULL;
695 tb_phys_invalidate(tb, -1);
697 env->current_tb = saved_tb;
698 if (env->interrupt_request && env->current_tb)
699 cpu_interrupt(env, env->interrupt_request);
704 #if !defined(CONFIG_USER_ONLY)
705 /* if no code remaining, no need to continue to use slow writes */
707 invalidate_page_bitmap(p);
708 if (is_cpu_write_access) {
709 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
713 #ifdef TARGET_HAS_PRECISE_SMC
714 if (current_tb_modified) {
715 /* we generate a block containing just the instruction
716 modifying the memory. It will ensure that it cannot modify
718 env->current_tb = NULL;
719 tb_gen_code(env, current_pc, current_cs_base, current_flags,
721 cpu_resume_from_signal(env, NULL);
726 /* len must be <= 8 and start must be a multiple of len */
727 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
734 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
735 cpu_single_env->mem_write_vaddr, len,
737 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
741 p = page_find(start >> TARGET_PAGE_BITS);
744 if (p->code_bitmap) {
745 offset = start & ~TARGET_PAGE_MASK;
746 b = p->code_bitmap[offset >> 3] >> (offset & 7);
747 if (b & ((1 << len) - 1))
751 tb_invalidate_phys_page_range(start, start + len, 1);
755 #if !defined(CONFIG_SOFTMMU)
756 static void tb_invalidate_phys_page(target_ulong addr,
757 unsigned long pc, void *puc)
759 int n, current_flags, current_tb_modified;
760 target_ulong current_pc, current_cs_base;
762 TranslationBlock *tb, *current_tb;
763 #ifdef TARGET_HAS_PRECISE_SMC
764 CPUState *env = cpu_single_env;
767 addr &= TARGET_PAGE_MASK;
768 p = page_find(addr >> TARGET_PAGE_BITS);
772 current_tb_modified = 0;
774 current_pc = 0; /* avoid warning */
775 current_cs_base = 0; /* avoid warning */
776 current_flags = 0; /* avoid warning */
777 #ifdef TARGET_HAS_PRECISE_SMC
779 current_tb = tb_find_pc(pc);
784 tb = (TranslationBlock *)((long)tb & ~3);
785 #ifdef TARGET_HAS_PRECISE_SMC
786 if (current_tb == tb &&
787 !(current_tb->cflags & CF_SINGLE_INSN)) {
788 /* If we are modifying the current TB, we must stop
789 its execution. We could be more precise by checking
790 that the modification is after the current PC, but it
791 would require a specialized function to partially
792 restore the CPU state */
794 current_tb_modified = 1;
795 cpu_restore_state(current_tb, env, pc, puc);
796 #if defined(TARGET_I386)
797 current_flags = env->hflags;
798 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
799 current_cs_base = (target_ulong)env->segs[R_CS].base;
800 current_pc = current_cs_base + env->eip;
802 #error unsupported CPU
805 #endif /* TARGET_HAS_PRECISE_SMC */
806 tb_phys_invalidate(tb, addr);
807 tb = tb->page_next[n];
810 #ifdef TARGET_HAS_PRECISE_SMC
811 if (current_tb_modified) {
812 /* we generate a block containing just the instruction
813 modifying the memory. It will ensure that it cannot modify
815 env->current_tb = NULL;
816 tb_gen_code(env, current_pc, current_cs_base, current_flags,
818 cpu_resume_from_signal(env, puc);
824 /* add the tb in the target page and protect it if necessary */
825 static inline void tb_alloc_page(TranslationBlock *tb,
826 unsigned int n, target_ulong page_addr)
829 TranslationBlock *last_first_tb;
831 tb->page_addr[n] = page_addr;
832 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
833 tb->page_next[n] = p->first_tb;
834 last_first_tb = p->first_tb;
835 p->first_tb = (TranslationBlock *)((long)tb | n);
836 invalidate_page_bitmap(p);
838 #if defined(TARGET_HAS_SMC) || 1
840 #if defined(CONFIG_USER_ONLY)
841 if (p->flags & PAGE_WRITE) {
846 /* force the host page as non writable (writes will have a
847 page fault + mprotect overhead) */
848 page_addr &= qemu_host_page_mask;
850 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
851 addr += TARGET_PAGE_SIZE) {
853 p2 = page_find (addr >> TARGET_PAGE_BITS);
857 p2->flags &= ~PAGE_WRITE;
858 page_get_flags(addr);
860 mprotect(g2h(page_addr), qemu_host_page_size,
861 (prot & PAGE_BITS) & ~PAGE_WRITE);
862 #ifdef DEBUG_TB_INVALIDATE
863 printf("protecting code page: 0x%08lx\n",
868 /* if some code is already present, then the pages are already
869 protected. So we handle the case where only the first TB is
870 allocated in a physical page */
871 if (!last_first_tb) {
872 tlb_protect_code(page_addr);
876 #endif /* TARGET_HAS_SMC */
879 /* Allocate a new translation block. Flush the translation buffer if
880 too many translation blocks or too much generated code. */
881 TranslationBlock *tb_alloc(target_ulong pc)
883 TranslationBlock *tb;
885 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
886 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
894 /* add a new TB and link it to the physical page tables. phys_page2 is
895 (-1) to indicate that only one page contains the TB. */
896 void tb_link_phys(TranslationBlock *tb,
897 target_ulong phys_pc, target_ulong phys_page2)
900 TranslationBlock **ptb;
902 /* add in the physical hash table */
903 h = tb_phys_hash_func(phys_pc);
904 ptb = &tb_phys_hash[h];
905 tb->phys_hash_next = *ptb;
908 /* add in the page list */
909 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
910 if (phys_page2 != -1)
911 tb_alloc_page(tb, 1, phys_page2);
913 tb->page_addr[1] = -1;
915 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
916 tb->jmp_next[0] = NULL;
917 tb->jmp_next[1] = NULL;
919 tb->cflags &= ~CF_FP_USED;
920 if (tb->cflags & CF_TB_FP_USED)
921 tb->cflags |= CF_FP_USED;
924 /* init original jump addresses */
925 if (tb->tb_next_offset[0] != 0xffff)
926 tb_reset_jump(tb, 0);
927 if (tb->tb_next_offset[1] != 0xffff)
928 tb_reset_jump(tb, 1);
930 #ifdef DEBUG_TB_CHECK
935 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
936 tb[1].tc_ptr. Return NULL if not found */
937 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
941 TranslationBlock *tb;
945 if (tc_ptr < (unsigned long)code_gen_buffer ||
946 tc_ptr >= (unsigned long)code_gen_ptr)
948 /* binary search (cf Knuth) */
951 while (m_min <= m_max) {
952 m = (m_min + m_max) >> 1;
954 v = (unsigned long)tb->tc_ptr;
957 else if (tc_ptr < v) {
966 static void tb_reset_jump_recursive(TranslationBlock *tb);
968 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
970 TranslationBlock *tb1, *tb_next, **ptb;
973 tb1 = tb->jmp_next[n];
975 /* find head of list */
978 tb1 = (TranslationBlock *)((long)tb1 & ~3);
981 tb1 = tb1->jmp_next[n1];
983 /* we are now sure now that tb jumps to tb1 */
986 /* remove tb from the jmp_first list */
987 ptb = &tb_next->jmp_first;
991 tb1 = (TranslationBlock *)((long)tb1 & ~3);
992 if (n1 == n && tb1 == tb)
994 ptb = &tb1->jmp_next[n1];
996 *ptb = tb->jmp_next[n];
997 tb->jmp_next[n] = NULL;
999 /* suppress the jump to next tb in generated code */
1000 tb_reset_jump(tb, n);
1002 /* suppress jumps in the tb on which we could have jumped */
1003 tb_reset_jump_recursive(tb_next);
1007 static void tb_reset_jump_recursive(TranslationBlock *tb)
1009 tb_reset_jump_recursive2(tb, 0);
1010 tb_reset_jump_recursive2(tb, 1);
1013 #if defined(TARGET_HAS_ICE)
1014 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1016 target_ulong addr, pd;
1017 ram_addr_t ram_addr;
1020 addr = cpu_get_phys_page_debug(env, pc);
1021 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1023 pd = IO_MEM_UNASSIGNED;
1025 pd = p->phys_offset;
1027 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1028 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1032 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1033 breakpoint is reached */
1034 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1036 #if defined(TARGET_HAS_ICE)
1039 for(i = 0; i < env->nb_breakpoints; i++) {
1040 if (env->breakpoints[i] == pc)
1044 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1046 env->breakpoints[env->nb_breakpoints++] = pc;
1048 breakpoint_invalidate(env, pc);
1055 /* remove a breakpoint */
1056 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1058 #if defined(TARGET_HAS_ICE)
1060 for(i = 0; i < env->nb_breakpoints; i++) {
1061 if (env->breakpoints[i] == pc)
1066 env->nb_breakpoints--;
1067 if (i < env->nb_breakpoints)
1068 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1070 breakpoint_invalidate(env, pc);
1077 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1078 CPU loop after each instruction */
1079 void cpu_single_step(CPUState *env, int enabled)
1081 #if defined(TARGET_HAS_ICE)
1082 if (env->singlestep_enabled != enabled) {
1083 env->singlestep_enabled = enabled;
1084 /* must flush all the translated code to avoid inconsistancies */
1085 /* XXX: only flush what is necessary */
1091 /* enable or disable low levels log */
1092 void cpu_set_log(int log_flags)
1094 loglevel = log_flags;
1095 if (loglevel && !logfile) {
1096 logfile = fopen(logfilename, "w");
1098 perror(logfilename);
1101 #if !defined(CONFIG_SOFTMMU)
1102 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1104 static uint8_t logfile_buf[4096];
1105 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1108 setvbuf(logfile, NULL, _IOLBF, 0);
1113 void cpu_set_log_filename(const char *filename)
1115 logfilename = strdup(filename);
1118 /* mask must never be zero, except for A20 change call */
1119 void cpu_interrupt(CPUState *env, int mask)
1121 TranslationBlock *tb;
1122 static int interrupt_lock;
1124 env->interrupt_request |= mask;
1125 /* if the cpu is currently executing code, we must unlink it and
1126 all the potentially executing TB */
1127 tb = env->current_tb;
1128 if (tb && !testandset(&interrupt_lock)) {
1129 env->current_tb = NULL;
1130 tb_reset_jump_recursive(tb);
1135 void cpu_reset_interrupt(CPUState *env, int mask)
1137 env->interrupt_request &= ~mask;
1140 CPULogItem cpu_log_items[] = {
1141 { CPU_LOG_TB_OUT_ASM, "out_asm",
1142 "show generated host assembly code for each compiled TB" },
1143 { CPU_LOG_TB_IN_ASM, "in_asm",
1144 "show target assembly code for each compiled TB" },
1145 { CPU_LOG_TB_OP, "op",
1146 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1148 { CPU_LOG_TB_OP_OPT, "op_opt",
1149 "show micro ops after optimization for each compiled TB" },
1151 { CPU_LOG_INT, "int",
1152 "show interrupts/exceptions in short format" },
1153 { CPU_LOG_EXEC, "exec",
1154 "show trace before each executed TB (lots of logs)" },
1155 { CPU_LOG_TB_CPU, "cpu",
1156 "show CPU state before bloc translation" },
1158 { CPU_LOG_PCALL, "pcall",
1159 "show protected mode far calls/returns/exceptions" },
1162 { CPU_LOG_IOPORT, "ioport",
1163 "show all i/o ports accesses" },
1168 static int cmp1(const char *s1, int n, const char *s2)
1170 if (strlen(s2) != n)
1172 return memcmp(s1, s2, n) == 0;
1175 /* takes a comma separated list of log masks. Return 0 if error. */
1176 int cpu_str_to_log_mask(const char *str)
1185 p1 = strchr(p, ',');
1188 if(cmp1(p,p1-p,"all")) {
1189 for(item = cpu_log_items; item->mask != 0; item++) {
1193 for(item = cpu_log_items; item->mask != 0; item++) {
1194 if (cmp1(p, p1 - p, item->name))
1208 void cpu_abort(CPUState *env, const char *fmt, ...)
1213 fprintf(stderr, "qemu: fatal: ");
1214 vfprintf(stderr, fmt, ap);
1215 fprintf(stderr, "\n");
1217 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1219 cpu_dump_state(env, stderr, fprintf, 0);
1225 #if !defined(CONFIG_USER_ONLY)
1227 /* NOTE: if flush_global is true, also flush global entries (not
1229 void tlb_flush(CPUState *env, int flush_global)
1233 #if defined(DEBUG_TLB)
1234 printf("tlb_flush:\n");
1236 /* must reset current TB so that interrupts cannot modify the
1237 links while we are modifying them */
1238 env->current_tb = NULL;
1240 for(i = 0; i < CPU_TLB_SIZE; i++) {
1241 env->tlb_table[0][i].addr_read = -1;
1242 env->tlb_table[0][i].addr_write = -1;
1243 env->tlb_table[0][i].addr_code = -1;
1244 env->tlb_table[1][i].addr_read = -1;
1245 env->tlb_table[1][i].addr_write = -1;
1246 env->tlb_table[1][i].addr_code = -1;
1249 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1251 #if !defined(CONFIG_SOFTMMU)
1252 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1255 if (env->kqemu_enabled) {
1256 kqemu_flush(env, flush_global);
1262 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1264 if (addr == (tlb_entry->addr_read &
1265 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1266 addr == (tlb_entry->addr_write &
1267 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1268 addr == (tlb_entry->addr_code &
1269 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1270 tlb_entry->addr_read = -1;
1271 tlb_entry->addr_write = -1;
1272 tlb_entry->addr_code = -1;
1276 void tlb_flush_page(CPUState *env, target_ulong addr)
1279 TranslationBlock *tb;
1281 #if defined(DEBUG_TLB)
1282 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1284 /* must reset current TB so that interrupts cannot modify the
1285 links while we are modifying them */
1286 env->current_tb = NULL;
1288 addr &= TARGET_PAGE_MASK;
1289 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1290 tlb_flush_entry(&env->tlb_table[0][i], addr);
1291 tlb_flush_entry(&env->tlb_table[1][i], addr);
1293 /* Discard jump cache entries for any tb which might potentially
1294 overlap the flushed page. */
1295 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1296 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1298 i = tb_jmp_cache_hash_page(addr);
1299 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1301 #if !defined(CONFIG_SOFTMMU)
1302 if (addr < MMAP_AREA_END)
1303 munmap((void *)addr, TARGET_PAGE_SIZE);
1306 if (env->kqemu_enabled) {
1307 kqemu_flush_page(env, addr);
1312 /* update the TLBs so that writes to code in the virtual page 'addr'
1314 static void tlb_protect_code(ram_addr_t ram_addr)
1316 cpu_physical_memory_reset_dirty(ram_addr,
1317 ram_addr + TARGET_PAGE_SIZE,
1321 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1322 tested for self modifying code */
1323 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1326 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1329 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1330 unsigned long start, unsigned long length)
1333 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1334 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1335 if ((addr - start) < length) {
1336 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1341 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1345 unsigned long length, start1;
1349 start &= TARGET_PAGE_MASK;
1350 end = TARGET_PAGE_ALIGN(end);
1352 length = end - start;
1355 len = length >> TARGET_PAGE_BITS;
1357 /* XXX: should not depend on cpu context */
1359 if (env->kqemu_enabled) {
1362 for(i = 0; i < len; i++) {
1363 kqemu_set_notdirty(env, addr);
1364 addr += TARGET_PAGE_SIZE;
1368 mask = ~dirty_flags;
1369 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1370 for(i = 0; i < len; i++)
1373 /* we modify the TLB cache so that the dirty bit will be set again
1374 when accessing the range */
1375 start1 = start + (unsigned long)phys_ram_base;
1376 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1377 for(i = 0; i < CPU_TLB_SIZE; i++)
1378 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1379 for(i = 0; i < CPU_TLB_SIZE; i++)
1380 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1383 #if !defined(CONFIG_SOFTMMU)
1384 /* XXX: this is expensive */
1390 for(i = 0; i < L1_SIZE; i++) {
1393 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1394 for(j = 0; j < L2_SIZE; j++) {
1395 if (p->valid_tag == virt_valid_tag &&
1396 p->phys_addr >= start && p->phys_addr < end &&
1397 (p->prot & PROT_WRITE)) {
1398 if (addr < MMAP_AREA_END) {
1399 mprotect((void *)addr, TARGET_PAGE_SIZE,
1400 p->prot & ~PROT_WRITE);
1403 addr += TARGET_PAGE_SIZE;
1412 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1414 ram_addr_t ram_addr;
1416 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1417 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1418 tlb_entry->addend - (unsigned long)phys_ram_base;
1419 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1420 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1425 /* update the TLB according to the current state of the dirty bits */
1426 void cpu_tlb_update_dirty(CPUState *env)
1429 for(i = 0; i < CPU_TLB_SIZE; i++)
1430 tlb_update_dirty(&env->tlb_table[0][i]);
1431 for(i = 0; i < CPU_TLB_SIZE; i++)
1432 tlb_update_dirty(&env->tlb_table[1][i]);
1435 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1436 unsigned long start)
1439 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1440 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1441 if (addr == start) {
1442 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1447 /* update the TLB corresponding to virtual page vaddr and phys addr
1448 addr so that it is no longer dirty */
1449 static inline void tlb_set_dirty(CPUState *env,
1450 unsigned long addr, target_ulong vaddr)
1454 addr &= TARGET_PAGE_MASK;
1455 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1456 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1457 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1460 /* add a new TLB entry. At most one entry for a given virtual address
1461 is permitted. Return 0 if OK or 2 if the page could not be mapped
1462 (can only happen in non SOFTMMU mode for I/O pages or pages
1463 conflicting with the host address space). */
1464 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1465 target_phys_addr_t paddr, int prot,
1466 int is_user, int is_softmmu)
1471 target_ulong address;
1472 target_phys_addr_t addend;
1476 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1478 pd = IO_MEM_UNASSIGNED;
1480 pd = p->phys_offset;
1482 #if defined(DEBUG_TLB)
1483 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1484 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1488 #if !defined(CONFIG_SOFTMMU)
1492 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1493 /* IO memory case */
1494 address = vaddr | pd;
1497 /* standard memory */
1499 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1502 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1504 te = &env->tlb_table[is_user][index];
1505 te->addend = addend;
1506 if (prot & PAGE_READ) {
1507 te->addr_read = address;
1511 if (prot & PAGE_EXEC) {
1512 te->addr_code = address;
1516 if (prot & PAGE_WRITE) {
1517 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1518 (pd & IO_MEM_ROMD)) {
1519 /* write access calls the I/O callback */
1520 te->addr_write = vaddr |
1521 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1522 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1523 !cpu_physical_memory_is_dirty(pd)) {
1524 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1526 te->addr_write = address;
1529 te->addr_write = -1;
1532 #if !defined(CONFIG_SOFTMMU)
1534 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1535 /* IO access: no mapping is done as it will be handled by the
1537 if (!(env->hflags & HF_SOFTMMU_MASK))
1542 if (vaddr >= MMAP_AREA_END) {
1545 if (prot & PROT_WRITE) {
1546 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1547 #if defined(TARGET_HAS_SMC) || 1
1550 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1551 !cpu_physical_memory_is_dirty(pd))) {
1552 /* ROM: we do as if code was inside */
1553 /* if code is present, we only map as read only and save the
1557 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1560 vp->valid_tag = virt_valid_tag;
1561 prot &= ~PAGE_WRITE;
1564 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1565 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1566 if (map_addr == MAP_FAILED) {
1567 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1577 /* called from signal handler: invalidate the code and unprotect the
1578 page. Return TRUE if the fault was succesfully handled. */
1579 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1581 #if !defined(CONFIG_SOFTMMU)
1584 #if defined(DEBUG_TLB)
1585 printf("page_unprotect: addr=0x%08x\n", addr);
1587 addr &= TARGET_PAGE_MASK;
1589 /* if it is not mapped, no need to worry here */
1590 if (addr >= MMAP_AREA_END)
1592 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1595 /* NOTE: in this case, validate_tag is _not_ tested as it
1596 validates only the code TLB */
1597 if (vp->valid_tag != virt_valid_tag)
1599 if (!(vp->prot & PAGE_WRITE))
1601 #if defined(DEBUG_TLB)
1602 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1603 addr, vp->phys_addr, vp->prot);
1605 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1606 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1607 (unsigned long)addr, vp->prot);
1608 /* set the dirty bit */
1609 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1610 /* flush the code inside */
1611 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1620 void tlb_flush(CPUState *env, int flush_global)
1624 void tlb_flush_page(CPUState *env, target_ulong addr)
1628 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1629 target_phys_addr_t paddr, int prot,
1630 int is_user, int is_softmmu)
1635 /* dump memory mappings */
1636 void page_dump(FILE *f)
1638 unsigned long start, end;
1639 int i, j, prot, prot1;
1642 fprintf(f, "%-8s %-8s %-8s %s\n",
1643 "start", "end", "size", "prot");
1647 for(i = 0; i <= L1_SIZE; i++) {
1652 for(j = 0;j < L2_SIZE; j++) {
1657 if (prot1 != prot) {
1658 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1660 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1661 start, end, end - start,
1662 prot & PAGE_READ ? 'r' : '-',
1663 prot & PAGE_WRITE ? 'w' : '-',
1664 prot & PAGE_EXEC ? 'x' : '-');
1678 int page_get_flags(target_ulong address)
1682 p = page_find(address >> TARGET_PAGE_BITS);
1688 /* modify the flags of a page and invalidate the code if
1689 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1690 depending on PAGE_WRITE */
1691 void page_set_flags(target_ulong start, target_ulong end, int flags)
1696 start = start & TARGET_PAGE_MASK;
1697 end = TARGET_PAGE_ALIGN(end);
1698 if (flags & PAGE_WRITE)
1699 flags |= PAGE_WRITE_ORG;
1700 spin_lock(&tb_lock);
1701 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1702 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1703 /* if the write protection is set, then we invalidate the code
1705 if (!(p->flags & PAGE_WRITE) &&
1706 (flags & PAGE_WRITE) &&
1708 tb_invalidate_phys_page(addr, 0, NULL);
1712 spin_unlock(&tb_lock);
1715 /* called from signal handler: invalidate the code and unprotect the
1716 page. Return TRUE if the fault was succesfully handled. */
1717 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1719 unsigned int page_index, prot, pindex;
1721 target_ulong host_start, host_end, addr;
1723 host_start = address & qemu_host_page_mask;
1724 page_index = host_start >> TARGET_PAGE_BITS;
1725 p1 = page_find(page_index);
1728 host_end = host_start + qemu_host_page_size;
1731 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1735 /* if the page was really writable, then we change its
1736 protection back to writable */
1737 if (prot & PAGE_WRITE_ORG) {
1738 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1739 if (!(p1[pindex].flags & PAGE_WRITE)) {
1740 mprotect((void *)g2h(host_start), qemu_host_page_size,
1741 (prot & PAGE_BITS) | PAGE_WRITE);
1742 p1[pindex].flags |= PAGE_WRITE;
1743 /* and since the content will be modified, we must invalidate
1744 the corresponding translated code. */
1745 tb_invalidate_phys_page(address, pc, puc);
1746 #ifdef DEBUG_TB_CHECK
1747 tb_invalidate_check(address);
1755 /* call this function when system calls directly modify a memory area */
1756 /* ??? This should be redundant now we have lock_user. */
1757 void page_unprotect_range(target_ulong data, target_ulong data_size)
1759 target_ulong start, end, addr;
1762 end = start + data_size;
1763 start &= TARGET_PAGE_MASK;
1764 end = TARGET_PAGE_ALIGN(end);
1765 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1766 page_unprotect(addr, 0, NULL);
1770 static inline void tlb_set_dirty(CPUState *env,
1771 unsigned long addr, target_ulong vaddr)
1774 #endif /* defined(CONFIG_USER_ONLY) */
1776 /* register physical memory. 'size' must be a multiple of the target
1777 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1779 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1781 unsigned long phys_offset)
1783 target_phys_addr_t addr, end_addr;
1787 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1788 end_addr = start_addr + size;
1789 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1790 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1791 p->phys_offset = phys_offset;
1792 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1793 (phys_offset & IO_MEM_ROMD))
1794 phys_offset += TARGET_PAGE_SIZE;
1797 /* since each CPU stores ram addresses in its TLB cache, we must
1798 reset the modified entries */
1800 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1805 /* XXX: temporary until new memory mapping API */
1806 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1810 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1812 return IO_MEM_UNASSIGNED;
1813 return p->phys_offset;
1816 /* XXX: better than nothing */
1817 ram_addr_t qemu_ram_alloc(unsigned int size)
1820 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1821 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1822 size, phys_ram_size);
1825 addr = phys_ram_alloc_offset;
1826 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1830 void qemu_ram_free(ram_addr_t addr)
1834 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1836 #ifdef DEBUG_UNASSIGNED
1837 printf("Unassigned mem read 0x%08x\n", (int)addr);
1842 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1844 #ifdef DEBUG_UNASSIGNED
1845 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1849 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1850 unassigned_mem_readb,
1851 unassigned_mem_readb,
1852 unassigned_mem_readb,
1855 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1856 unassigned_mem_writeb,
1857 unassigned_mem_writeb,
1858 unassigned_mem_writeb,
1861 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1863 unsigned long ram_addr;
1865 ram_addr = addr - (unsigned long)phys_ram_base;
1866 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1867 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1868 #if !defined(CONFIG_USER_ONLY)
1869 tb_invalidate_phys_page_fast(ram_addr, 1);
1870 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1873 stb_p((uint8_t *)(long)addr, val);
1875 if (cpu_single_env->kqemu_enabled &&
1876 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1877 kqemu_modify_page(cpu_single_env, ram_addr);
1879 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1880 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1881 /* we remove the notdirty callback only if the code has been
1883 if (dirty_flags == 0xff)
1884 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1887 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1889 unsigned long ram_addr;
1891 ram_addr = addr - (unsigned long)phys_ram_base;
1892 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1893 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1894 #if !defined(CONFIG_USER_ONLY)
1895 tb_invalidate_phys_page_fast(ram_addr, 2);
1896 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1899 stw_p((uint8_t *)(long)addr, val);
1901 if (cpu_single_env->kqemu_enabled &&
1902 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1903 kqemu_modify_page(cpu_single_env, ram_addr);
1905 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1906 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1907 /* we remove the notdirty callback only if the code has been
1909 if (dirty_flags == 0xff)
1910 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1913 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1915 unsigned long ram_addr;
1917 ram_addr = addr - (unsigned long)phys_ram_base;
1918 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1919 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1920 #if !defined(CONFIG_USER_ONLY)
1921 tb_invalidate_phys_page_fast(ram_addr, 4);
1922 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1925 stl_p((uint8_t *)(long)addr, val);
1927 if (cpu_single_env->kqemu_enabled &&
1928 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1929 kqemu_modify_page(cpu_single_env, ram_addr);
1931 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1932 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1933 /* we remove the notdirty callback only if the code has been
1935 if (dirty_flags == 0xff)
1936 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1939 static CPUReadMemoryFunc *error_mem_read[3] = {
1940 NULL, /* never used */
1941 NULL, /* never used */
1942 NULL, /* never used */
1945 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1946 notdirty_mem_writeb,
1947 notdirty_mem_writew,
1948 notdirty_mem_writel,
1951 static void io_mem_init(void)
1953 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1954 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1955 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1958 /* alloc dirty bits array */
1959 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1960 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1963 /* mem_read and mem_write are arrays of functions containing the
1964 function to access byte (index 0), word (index 1) and dword (index
1965 2). All functions must be supplied. If io_index is non zero, the
1966 corresponding io zone is modified. If it is zero, a new io zone is
1967 allocated. The return value can be used with
1968 cpu_register_physical_memory(). (-1) is returned if error. */
1969 int cpu_register_io_memory(int io_index,
1970 CPUReadMemoryFunc **mem_read,
1971 CPUWriteMemoryFunc **mem_write,
1976 if (io_index <= 0) {
1977 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1979 io_index = io_mem_nb++;
1981 if (io_index >= IO_MEM_NB_ENTRIES)
1985 for(i = 0;i < 3; i++) {
1986 io_mem_read[io_index][i] = mem_read[i];
1987 io_mem_write[io_index][i] = mem_write[i];
1989 io_mem_opaque[io_index] = opaque;
1990 return io_index << IO_MEM_SHIFT;
1993 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1995 return io_mem_write[io_index >> IO_MEM_SHIFT];
1998 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2000 return io_mem_read[io_index >> IO_MEM_SHIFT];
2003 /* physical memory access (slow version, mainly for debug) */
2004 #if defined(CONFIG_USER_ONLY)
2005 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2006 int len, int is_write)
2013 page = addr & TARGET_PAGE_MASK;
2014 l = (page + TARGET_PAGE_SIZE) - addr;
2017 flags = page_get_flags(page);
2018 if (!(flags & PAGE_VALID))
2021 if (!(flags & PAGE_WRITE))
2023 p = lock_user(addr, len, 0);
2024 memcpy(p, buf, len);
2025 unlock_user(p, addr, len);
2027 if (!(flags & PAGE_READ))
2029 p = lock_user(addr, len, 1);
2030 memcpy(buf, p, len);
2031 unlock_user(p, addr, 0);
2040 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2041 int len, int is_write)
2046 target_phys_addr_t page;
2051 page = addr & TARGET_PAGE_MASK;
2052 l = (page + TARGET_PAGE_SIZE) - addr;
2055 p = phys_page_find(page >> TARGET_PAGE_BITS);
2057 pd = IO_MEM_UNASSIGNED;
2059 pd = p->phys_offset;
2063 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2064 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2065 /* XXX: could force cpu_single_env to NULL to avoid
2067 if (l >= 4 && ((addr & 3) == 0)) {
2068 /* 32 bit write access */
2070 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2072 } else if (l >= 2 && ((addr & 1) == 0)) {
2073 /* 16 bit write access */
2075 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2078 /* 8 bit write access */
2080 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2084 unsigned long addr1;
2085 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2087 ptr = phys_ram_base + addr1;
2088 memcpy(ptr, buf, l);
2089 if (!cpu_physical_memory_is_dirty(addr1)) {
2090 /* invalidate code */
2091 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2093 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2094 (0xff & ~CODE_DIRTY_FLAG);
2098 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2099 !(pd & IO_MEM_ROMD)) {
2101 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2102 if (l >= 4 && ((addr & 3) == 0)) {
2103 /* 32 bit read access */
2104 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2107 } else if (l >= 2 && ((addr & 1) == 0)) {
2108 /* 16 bit read access */
2109 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2113 /* 8 bit read access */
2114 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2120 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2121 (addr & ~TARGET_PAGE_MASK);
2122 memcpy(buf, ptr, l);
2131 /* used for ROM loading : can write in RAM and ROM */
2132 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2133 const uint8_t *buf, int len)
2137 target_phys_addr_t page;
2142 page = addr & TARGET_PAGE_MASK;
2143 l = (page + TARGET_PAGE_SIZE) - addr;
2146 p = phys_page_find(page >> TARGET_PAGE_BITS);
2148 pd = IO_MEM_UNASSIGNED;
2150 pd = p->phys_offset;
2153 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2154 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2155 !(pd & IO_MEM_ROMD)) {
2158 unsigned long addr1;
2159 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2161 ptr = phys_ram_base + addr1;
2162 memcpy(ptr, buf, l);
2171 /* warning: addr must be aligned */
2172 uint32_t ldl_phys(target_phys_addr_t addr)
2180 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2182 pd = IO_MEM_UNASSIGNED;
2184 pd = p->phys_offset;
2187 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2188 !(pd & IO_MEM_ROMD)) {
2190 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2191 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2194 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2195 (addr & ~TARGET_PAGE_MASK);
2201 /* warning: addr must be aligned */
2202 uint64_t ldq_phys(target_phys_addr_t addr)
2210 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2212 pd = IO_MEM_UNASSIGNED;
2214 pd = p->phys_offset;
2217 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2218 !(pd & IO_MEM_ROMD)) {
2220 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2221 #ifdef TARGET_WORDS_BIGENDIAN
2222 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2223 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2225 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2226 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2230 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2231 (addr & ~TARGET_PAGE_MASK);
2238 uint32_t ldub_phys(target_phys_addr_t addr)
2241 cpu_physical_memory_read(addr, &val, 1);
2246 uint32_t lduw_phys(target_phys_addr_t addr)
2249 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2250 return tswap16(val);
2253 /* warning: addr must be aligned. The ram page is not masked as dirty
2254 and the code inside is not invalidated. It is useful if the dirty
2255 bits are used to track modified PTEs */
2256 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2263 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2265 pd = IO_MEM_UNASSIGNED;
2267 pd = p->phys_offset;
2270 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2271 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2272 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2274 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2275 (addr & ~TARGET_PAGE_MASK);
2280 /* warning: addr must be aligned */
2281 void stl_phys(target_phys_addr_t addr, uint32_t val)
2288 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2290 pd = IO_MEM_UNASSIGNED;
2292 pd = p->phys_offset;
2295 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2296 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2297 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2299 unsigned long addr1;
2300 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2302 ptr = phys_ram_base + addr1;
2304 if (!cpu_physical_memory_is_dirty(addr1)) {
2305 /* invalidate code */
2306 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2308 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2309 (0xff & ~CODE_DIRTY_FLAG);
2315 void stb_phys(target_phys_addr_t addr, uint32_t val)
2318 cpu_physical_memory_write(addr, &v, 1);
2322 void stw_phys(target_phys_addr_t addr, uint32_t val)
2324 uint16_t v = tswap16(val);
2325 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2329 void stq_phys(target_phys_addr_t addr, uint64_t val)
2332 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2337 /* virtual memory access for debug */
2338 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2339 uint8_t *buf, int len, int is_write)
2342 target_ulong page, phys_addr;
2345 page = addr & TARGET_PAGE_MASK;
2346 phys_addr = cpu_get_phys_page_debug(env, page);
2347 /* if no physical page mapped, return an error */
2348 if (phys_addr == -1)
2350 l = (page + TARGET_PAGE_SIZE) - addr;
2353 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2362 void dump_exec_info(FILE *f,
2363 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2365 int i, target_code_size, max_target_code_size;
2366 int direct_jmp_count, direct_jmp2_count, cross_page;
2367 TranslationBlock *tb;
2369 target_code_size = 0;
2370 max_target_code_size = 0;
2372 direct_jmp_count = 0;
2373 direct_jmp2_count = 0;
2374 for(i = 0; i < nb_tbs; i++) {
2376 target_code_size += tb->size;
2377 if (tb->size > max_target_code_size)
2378 max_target_code_size = tb->size;
2379 if (tb->page_addr[1] != -1)
2381 if (tb->tb_next_offset[0] != 0xffff) {
2383 if (tb->tb_next_offset[1] != 0xffff) {
2384 direct_jmp2_count++;
2388 /* XXX: avoid using doubles ? */
2389 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2390 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2391 nb_tbs ? target_code_size / nb_tbs : 0,
2392 max_target_code_size);
2393 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2394 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2395 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2396 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2398 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2399 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2401 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2403 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2404 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2405 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2406 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2409 #if !defined(CONFIG_USER_ONLY)
2411 #define MMUSUFFIX _cmmu
2412 #define GETPC() NULL
2413 #define env cpu_single_env
2414 #define SOFTMMU_CODE_ACCESS
2417 #include "softmmu_template.h"
2420 #include "softmmu_template.h"
2423 #include "softmmu_template.h"
2426 #include "softmmu_template.h"