2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
37 //#define DEBUG_TB_INVALIDATE
41 /* make various TB consistency checks */
42 //#define DEBUG_TB_CHECK
43 //#define DEBUG_TLB_CHECK
45 /* threshold to flush the translated code buffer */
46 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48 #define SMC_BITMAP_USE_THRESHOLD 10
50 #define MMAP_AREA_START 0x00000000
51 #define MMAP_AREA_END 0xa8000000
53 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
54 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
55 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
57 /* any access to the tbs or the page table must use this lock */
58 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
60 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
61 uint8_t *code_gen_ptr;
65 uint8_t *phys_ram_base;
66 uint8_t *phys_ram_dirty;
68 typedef struct PageDesc {
69 /* list of TBs intersecting this ram page */
70 TranslationBlock *first_tb;
71 /* in order to optimize self modifying code, we count the number
72 of lookups we do to a given page to use a bitmap */
73 unsigned int code_write_count;
75 #if defined(CONFIG_USER_ONLY)
80 typedef struct PhysPageDesc {
81 /* offset in host memory of the page + io_index in the low 12 bits */
82 unsigned long phys_offset;
85 typedef struct VirtPageDesc {
86 /* physical address of code page. It is valid only if 'valid_tag'
87 matches 'virt_valid_tag' */
88 target_ulong phys_addr;
89 unsigned int valid_tag;
90 #if !defined(CONFIG_SOFTMMU)
91 /* original page access rights. It is valid only if 'valid_tag'
92 matches 'virt_valid_tag' */
98 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
100 #define L1_SIZE (1 << L1_BITS)
101 #define L2_SIZE (1 << L2_BITS)
103 static void io_mem_init(void);
105 unsigned long qemu_real_host_page_size;
106 unsigned long qemu_host_page_bits;
107 unsigned long qemu_host_page_size;
108 unsigned long qemu_host_page_mask;
110 /* XXX: for system emulation, it could just be an array */
111 static PageDesc *l1_map[L1_SIZE];
112 static PhysPageDesc *l1_phys_map[L1_SIZE];
114 #if !defined(CONFIG_USER_ONLY)
115 static VirtPageDesc *l1_virt_map[L1_SIZE];
116 static unsigned int virt_valid_tag;
119 /* io memory support */
120 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
121 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
122 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
123 static int io_mem_nb;
126 char *logfilename = "/tmp/qemu.log";
130 static void page_init(void)
132 /* NOTE: we can always suppose that qemu_host_page_size >=
136 SYSTEM_INFO system_info;
139 GetSystemInfo(&system_info);
140 qemu_real_host_page_size = system_info.dwPageSize;
142 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
143 PAGE_EXECUTE_READWRITE, &old_protect);
146 qemu_real_host_page_size = getpagesize();
148 unsigned long start, end;
150 start = (unsigned long)code_gen_buffer;
151 start &= ~(qemu_real_host_page_size - 1);
153 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
154 end += qemu_real_host_page_size - 1;
155 end &= ~(qemu_real_host_page_size - 1);
157 mprotect((void *)start, end - start,
158 PROT_READ | PROT_WRITE | PROT_EXEC);
162 if (qemu_host_page_size == 0)
163 qemu_host_page_size = qemu_real_host_page_size;
164 if (qemu_host_page_size < TARGET_PAGE_SIZE)
165 qemu_host_page_size = TARGET_PAGE_SIZE;
166 qemu_host_page_bits = 0;
167 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
168 qemu_host_page_bits++;
169 qemu_host_page_mask = ~(qemu_host_page_size - 1);
170 #if !defined(CONFIG_USER_ONLY)
175 static inline PageDesc *page_find_alloc(unsigned int index)
179 lp = &l1_map[index >> L2_BITS];
182 /* allocate if not found */
183 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
184 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
187 return p + (index & (L2_SIZE - 1));
190 static inline PageDesc *page_find(unsigned int index)
194 p = l1_map[index >> L2_BITS];
197 return p + (index & (L2_SIZE - 1));
200 static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
202 PhysPageDesc **lp, *p;
204 lp = &l1_phys_map[index >> L2_BITS];
207 /* allocate if not found */
208 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
209 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
212 return p + (index & (L2_SIZE - 1));
215 static inline PhysPageDesc *phys_page_find(unsigned int index)
219 p = l1_phys_map[index >> L2_BITS];
222 return p + (index & (L2_SIZE - 1));
225 #if !defined(CONFIG_USER_ONLY)
226 static void tlb_protect_code(CPUState *env, target_ulong addr);
227 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
229 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
231 VirtPageDesc **lp, *p;
233 lp = &l1_virt_map[index >> L2_BITS];
236 /* allocate if not found */
237 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
238 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
241 return p + (index & (L2_SIZE - 1));
244 static inline VirtPageDesc *virt_page_find(unsigned int index)
248 p = l1_virt_map[index >> L2_BITS];
251 return p + (index & (L2_SIZE - 1));
254 static void virt_page_flush(void)
261 if (virt_valid_tag == 0) {
263 for(i = 0; i < L1_SIZE; i++) {
266 for(j = 0; j < L2_SIZE; j++)
273 static void virt_page_flush(void)
278 void cpu_exec_init(void)
281 code_gen_ptr = code_gen_buffer;
287 static inline void invalidate_page_bitmap(PageDesc *p)
289 if (p->code_bitmap) {
290 qemu_free(p->code_bitmap);
291 p->code_bitmap = NULL;
293 p->code_write_count = 0;
296 /* set to NULL all the 'first_tb' fields in all PageDescs */
297 static void page_flush_tb(void)
302 for(i = 0; i < L1_SIZE; i++) {
305 for(j = 0; j < L2_SIZE; j++) {
307 invalidate_page_bitmap(p);
314 /* flush all the translation blocks */
315 /* XXX: tb_flush is currently not thread safe */
316 void tb_flush(CPUState *env)
318 #if defined(DEBUG_FLUSH)
319 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
320 code_gen_ptr - code_gen_buffer,
322 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
325 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
328 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
331 code_gen_ptr = code_gen_buffer;
332 /* XXX: flush processor icache at this point if cache flush is
336 #ifdef DEBUG_TB_CHECK
338 static void tb_invalidate_check(unsigned long address)
340 TranslationBlock *tb;
342 address &= TARGET_PAGE_MASK;
343 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
344 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
345 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
346 address >= tb->pc + tb->size)) {
347 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
348 address, tb->pc, tb->size);
354 /* verify that all the pages have correct rights for code */
355 static void tb_page_check(void)
357 TranslationBlock *tb;
358 int i, flags1, flags2;
360 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
361 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
362 flags1 = page_get_flags(tb->pc);
363 flags2 = page_get_flags(tb->pc + tb->size - 1);
364 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
365 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
366 tb->pc, tb->size, flags1, flags2);
372 void tb_jmp_check(TranslationBlock *tb)
374 TranslationBlock *tb1;
377 /* suppress any remaining jumps to this TB */
381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
384 tb1 = tb1->jmp_next[n1];
386 /* check end of list */
388 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
394 /* invalidate one TB */
395 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
398 TranslationBlock *tb1;
402 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
405 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
409 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
411 TranslationBlock *tb1;
417 tb1 = (TranslationBlock *)((long)tb1 & ~3);
419 *ptb = tb1->page_next[n1];
422 ptb = &tb1->page_next[n1];
426 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
428 TranslationBlock *tb1, **ptb;
431 ptb = &tb->jmp_next[n];
434 /* find tb(n) in circular list */
438 tb1 = (TranslationBlock *)((long)tb1 & ~3);
439 if (n1 == n && tb1 == tb)
442 ptb = &tb1->jmp_first;
444 ptb = &tb1->jmp_next[n1];
447 /* now we can suppress tb(n) from the list */
448 *ptb = tb->jmp_next[n];
450 tb->jmp_next[n] = NULL;
454 /* reset the jump entry 'n' of a TB so that it is not chained to
456 static inline void tb_reset_jump(TranslationBlock *tb, int n)
458 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
461 static inline void tb_invalidate(TranslationBlock *tb)
464 TranslationBlock *tb1, *tb2, **ptb;
466 tb_invalidated_flag = 1;
468 /* remove the TB from the hash list */
469 h = tb_hash_func(tb->pc);
473 /* NOTE: the TB is not necessarily linked in the hash. It
474 indicates that it is not currently used */
478 *ptb = tb1->hash_next;
481 ptb = &tb1->hash_next;
484 /* suppress this TB from the two jump lists */
485 tb_jmp_remove(tb, 0);
486 tb_jmp_remove(tb, 1);
488 /* suppress any remaining jumps to this TB */
494 tb1 = (TranslationBlock *)((long)tb1 & ~3);
495 tb2 = tb1->jmp_next[n1];
496 tb_reset_jump(tb1, n1);
497 tb1->jmp_next[n1] = NULL;
500 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
503 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
507 target_ulong phys_pc;
509 /* remove the TB from the hash list */
510 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
511 h = tb_phys_hash_func(phys_pc);
512 tb_remove(&tb_phys_hash[h], tb,
513 offsetof(TranslationBlock, phys_hash_next));
515 /* remove the TB from the page list */
516 if (tb->page_addr[0] != page_addr) {
517 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
518 tb_page_remove(&p->first_tb, tb);
519 invalidate_page_bitmap(p);
521 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
522 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
523 tb_page_remove(&p->first_tb, tb);
524 invalidate_page_bitmap(p);
530 static inline void set_bits(uint8_t *tab, int start, int len)
536 mask = 0xff << (start & 7);
537 if ((start & ~7) == (end & ~7)) {
539 mask &= ~(0xff << (end & 7));
544 start = (start + 8) & ~7;
546 while (start < end1) {
551 mask = ~(0xff << (end & 7));
557 static void build_page_bitmap(PageDesc *p)
559 int n, tb_start, tb_end;
560 TranslationBlock *tb;
562 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
565 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
570 tb = (TranslationBlock *)((long)tb & ~3);
571 /* NOTE: this is subtle as a TB may span two physical pages */
573 /* NOTE: tb_end may be after the end of the page, but
574 it is not a problem */
575 tb_start = tb->pc & ~TARGET_PAGE_MASK;
576 tb_end = tb_start + tb->size;
577 if (tb_end > TARGET_PAGE_SIZE)
578 tb_end = TARGET_PAGE_SIZE;
581 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
583 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
584 tb = tb->page_next[n];
588 #ifdef TARGET_HAS_PRECISE_SMC
590 static void tb_gen_code(CPUState *env,
591 target_ulong pc, target_ulong cs_base, int flags,
594 TranslationBlock *tb;
596 target_ulong phys_pc, phys_page2, virt_page2;
599 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
600 tb = tb_alloc((unsigned long)pc);
602 /* flush must be done */
604 /* cannot fail at this point */
605 tb = tb_alloc((unsigned long)pc);
607 tc_ptr = code_gen_ptr;
609 tb->cs_base = cs_base;
612 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
613 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
615 /* check next page if needed */
616 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
618 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
619 phys_page2 = get_phys_addr_code(env, virt_page2);
621 tb_link_phys(tb, phys_pc, phys_page2);
625 /* invalidate all TBs which intersect with the target physical page
626 starting in range [start;end[. NOTE: start and end must refer to
627 the same physical page. 'is_cpu_write_access' should be true if called
628 from a real cpu write access: the virtual CPU will exit the current
629 TB if code is modified inside this TB. */
630 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
631 int is_cpu_write_access)
633 int n, current_tb_modified, current_tb_not_found, current_flags;
634 CPUState *env = cpu_single_env;
636 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
637 target_ulong tb_start, tb_end;
638 target_ulong current_pc, current_cs_base;
640 p = page_find(start >> TARGET_PAGE_BITS);
643 if (!p->code_bitmap &&
644 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
645 is_cpu_write_access) {
646 /* build code bitmap */
647 build_page_bitmap(p);
650 /* we remove all the TBs in the range [start, end[ */
651 /* XXX: see if in some cases it could be faster to invalidate all the code */
652 current_tb_not_found = is_cpu_write_access;
653 current_tb_modified = 0;
654 current_tb = NULL; /* avoid warning */
655 current_pc = 0; /* avoid warning */
656 current_cs_base = 0; /* avoid warning */
657 current_flags = 0; /* avoid warning */
661 tb = (TranslationBlock *)((long)tb & ~3);
662 tb_next = tb->page_next[n];
663 /* NOTE: this is subtle as a TB may span two physical pages */
665 /* NOTE: tb_end may be after the end of the page, but
666 it is not a problem */
667 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
668 tb_end = tb_start + tb->size;
670 tb_start = tb->page_addr[1];
671 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
673 if (!(tb_end <= start || tb_start >= end)) {
674 #ifdef TARGET_HAS_PRECISE_SMC
675 if (current_tb_not_found) {
676 current_tb_not_found = 0;
678 if (env->mem_write_pc) {
679 /* now we have a real cpu fault */
680 current_tb = tb_find_pc(env->mem_write_pc);
683 if (current_tb == tb &&
684 !(current_tb->cflags & CF_SINGLE_INSN)) {
685 /* If we are modifying the current TB, we must stop
686 its execution. We could be more precise by checking
687 that the modification is after the current PC, but it
688 would require a specialized function to partially
689 restore the CPU state */
691 current_tb_modified = 1;
692 cpu_restore_state(current_tb, env,
693 env->mem_write_pc, NULL);
694 #if defined(TARGET_I386)
695 current_flags = env->hflags;
696 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
697 current_cs_base = (target_ulong)env->segs[R_CS].base;
698 current_pc = current_cs_base + env->eip;
700 #error unsupported CPU
703 #endif /* TARGET_HAS_PRECISE_SMC */
704 saved_tb = env->current_tb;
705 env->current_tb = NULL;
706 tb_phys_invalidate(tb, -1);
707 env->current_tb = saved_tb;
708 if (env->interrupt_request && env->current_tb)
709 cpu_interrupt(env, env->interrupt_request);
713 #if !defined(CONFIG_USER_ONLY)
714 /* if no code remaining, no need to continue to use slow writes */
716 invalidate_page_bitmap(p);
717 if (is_cpu_write_access) {
718 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
722 #ifdef TARGET_HAS_PRECISE_SMC
723 if (current_tb_modified) {
724 /* we generate a block containing just the instruction
725 modifying the memory. It will ensure that it cannot modify
727 env->current_tb = NULL;
728 tb_gen_code(env, current_pc, current_cs_base, current_flags,
730 cpu_resume_from_signal(env, NULL);
735 /* len must be <= 8 and start must be a multiple of len */
736 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
743 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
744 cpu_single_env->mem_write_vaddr, len,
746 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
750 p = page_find(start >> TARGET_PAGE_BITS);
753 if (p->code_bitmap) {
754 offset = start & ~TARGET_PAGE_MASK;
755 b = p->code_bitmap[offset >> 3] >> (offset & 7);
756 if (b & ((1 << len) - 1))
760 tb_invalidate_phys_page_range(start, start + len, 1);
764 #if !defined(CONFIG_SOFTMMU)
765 static void tb_invalidate_phys_page(target_ulong addr,
766 unsigned long pc, void *puc)
768 int n, current_flags, current_tb_modified;
769 target_ulong current_pc, current_cs_base;
771 TranslationBlock *tb, *current_tb;
772 #ifdef TARGET_HAS_PRECISE_SMC
773 CPUState *env = cpu_single_env;
776 addr &= TARGET_PAGE_MASK;
777 p = page_find(addr >> TARGET_PAGE_BITS);
781 current_tb_modified = 0;
783 current_pc = 0; /* avoid warning */
784 current_cs_base = 0; /* avoid warning */
785 current_flags = 0; /* avoid warning */
786 #ifdef TARGET_HAS_PRECISE_SMC
788 current_tb = tb_find_pc(pc);
793 tb = (TranslationBlock *)((long)tb & ~3);
794 #ifdef TARGET_HAS_PRECISE_SMC
795 if (current_tb == tb &&
796 !(current_tb->cflags & CF_SINGLE_INSN)) {
797 /* If we are modifying the current TB, we must stop
798 its execution. We could be more precise by checking
799 that the modification is after the current PC, but it
800 would require a specialized function to partially
801 restore the CPU state */
803 current_tb_modified = 1;
804 cpu_restore_state(current_tb, env, pc, puc);
805 #if defined(TARGET_I386)
806 current_flags = env->hflags;
807 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
808 current_cs_base = (target_ulong)env->segs[R_CS].base;
809 current_pc = current_cs_base + env->eip;
811 #error unsupported CPU
814 #endif /* TARGET_HAS_PRECISE_SMC */
815 tb_phys_invalidate(tb, addr);
816 tb = tb->page_next[n];
819 #ifdef TARGET_HAS_PRECISE_SMC
820 if (current_tb_modified) {
821 /* we generate a block containing just the instruction
822 modifying the memory. It will ensure that it cannot modify
824 env->current_tb = NULL;
825 tb_gen_code(env, current_pc, current_cs_base, current_flags,
827 cpu_resume_from_signal(env, puc);
833 /* add the tb in the target page and protect it if necessary */
834 static inline void tb_alloc_page(TranslationBlock *tb,
835 unsigned int n, unsigned int page_addr)
838 TranslationBlock *last_first_tb;
840 tb->page_addr[n] = page_addr;
841 p = page_find(page_addr >> TARGET_PAGE_BITS);
842 tb->page_next[n] = p->first_tb;
843 last_first_tb = p->first_tb;
844 p->first_tb = (TranslationBlock *)((long)tb | n);
845 invalidate_page_bitmap(p);
847 #if defined(TARGET_HAS_SMC) || 1
849 #if defined(CONFIG_USER_ONLY)
850 if (p->flags & PAGE_WRITE) {
851 unsigned long host_start, host_end, addr;
854 /* force the host page as non writable (writes will have a
855 page fault + mprotect overhead) */
856 host_start = page_addr & qemu_host_page_mask;
857 host_end = host_start + qemu_host_page_size;
859 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
860 prot |= page_get_flags(addr);
861 mprotect((void *)host_start, qemu_host_page_size,
862 (prot & PAGE_BITS) & ~PAGE_WRITE);
863 #ifdef DEBUG_TB_INVALIDATE
864 printf("protecting code page: 0x%08lx\n",
867 p->flags &= ~PAGE_WRITE;
870 /* if some code is already present, then the pages are already
871 protected. So we handle the case where only the first TB is
872 allocated in a physical page */
873 if (!last_first_tb) {
874 target_ulong virt_addr;
876 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
877 tlb_protect_code(cpu_single_env, virt_addr);
881 #endif /* TARGET_HAS_SMC */
884 /* Allocate a new translation block. Flush the translation buffer if
885 too many translation blocks or too much generated code. */
886 TranslationBlock *tb_alloc(unsigned long pc)
888 TranslationBlock *tb;
890 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
891 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
899 /* add a new TB and link it to the physical page tables. phys_page2 is
900 (-1) to indicate that only one page contains the TB. */
901 void tb_link_phys(TranslationBlock *tb,
902 target_ulong phys_pc, target_ulong phys_page2)
905 TranslationBlock **ptb;
907 /* add in the physical hash table */
908 h = tb_phys_hash_func(phys_pc);
909 ptb = &tb_phys_hash[h];
910 tb->phys_hash_next = *ptb;
913 /* add in the page list */
914 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
915 if (phys_page2 != -1)
916 tb_alloc_page(tb, 1, phys_page2);
918 tb->page_addr[1] = -1;
919 #ifdef DEBUG_TB_CHECK
924 /* link the tb with the other TBs */
925 void tb_link(TranslationBlock *tb)
927 #if !defined(CONFIG_USER_ONLY)
932 /* save the code memory mappings (needed to invalidate the code) */
933 addr = tb->pc & TARGET_PAGE_MASK;
934 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
935 #ifdef DEBUG_TLB_CHECK
936 if (vp->valid_tag == virt_valid_tag &&
937 vp->phys_addr != tb->page_addr[0]) {
938 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
939 addr, tb->page_addr[0], vp->phys_addr);
942 vp->phys_addr = tb->page_addr[0];
943 if (vp->valid_tag != virt_valid_tag) {
944 vp->valid_tag = virt_valid_tag;
945 #if !defined(CONFIG_SOFTMMU)
950 if (tb->page_addr[1] != -1) {
951 addr += TARGET_PAGE_SIZE;
952 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
953 #ifdef DEBUG_TLB_CHECK
954 if (vp->valid_tag == virt_valid_tag &&
955 vp->phys_addr != tb->page_addr[1]) {
956 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
957 addr, tb->page_addr[1], vp->phys_addr);
960 vp->phys_addr = tb->page_addr[1];
961 if (vp->valid_tag != virt_valid_tag) {
962 vp->valid_tag = virt_valid_tag;
963 #if !defined(CONFIG_SOFTMMU)
971 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
972 tb->jmp_next[0] = NULL;
973 tb->jmp_next[1] = NULL;
975 tb->cflags &= ~CF_FP_USED;
976 if (tb->cflags & CF_TB_FP_USED)
977 tb->cflags |= CF_FP_USED;
980 /* init original jump addresses */
981 if (tb->tb_next_offset[0] != 0xffff)
982 tb_reset_jump(tb, 0);
983 if (tb->tb_next_offset[1] != 0xffff)
984 tb_reset_jump(tb, 1);
987 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
988 tb[1].tc_ptr. Return NULL if not found */
989 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
993 TranslationBlock *tb;
997 if (tc_ptr < (unsigned long)code_gen_buffer ||
998 tc_ptr >= (unsigned long)code_gen_ptr)
1000 /* binary search (cf Knuth) */
1003 while (m_min <= m_max) {
1004 m = (m_min + m_max) >> 1;
1006 v = (unsigned long)tb->tc_ptr;
1009 else if (tc_ptr < v) {
1018 static void tb_reset_jump_recursive(TranslationBlock *tb);
1020 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1022 TranslationBlock *tb1, *tb_next, **ptb;
1025 tb1 = tb->jmp_next[n];
1027 /* find head of list */
1030 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1033 tb1 = tb1->jmp_next[n1];
1035 /* we are now sure now that tb jumps to tb1 */
1038 /* remove tb from the jmp_first list */
1039 ptb = &tb_next->jmp_first;
1043 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1044 if (n1 == n && tb1 == tb)
1046 ptb = &tb1->jmp_next[n1];
1048 *ptb = tb->jmp_next[n];
1049 tb->jmp_next[n] = NULL;
1051 /* suppress the jump to next tb in generated code */
1052 tb_reset_jump(tb, n);
1054 /* suppress jumps in the tb on which we could have jumped */
1055 tb_reset_jump_recursive(tb_next);
1059 static void tb_reset_jump_recursive(TranslationBlock *tb)
1061 tb_reset_jump_recursive2(tb, 0);
1062 tb_reset_jump_recursive2(tb, 1);
1065 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1067 target_ulong phys_addr;
1069 phys_addr = cpu_get_phys_page_debug(env, pc);
1070 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1073 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1074 breakpoint is reached */
1075 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1077 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1080 for(i = 0; i < env->nb_breakpoints; i++) {
1081 if (env->breakpoints[i] == pc)
1085 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1087 env->breakpoints[env->nb_breakpoints++] = pc;
1089 breakpoint_invalidate(env, pc);
1096 /* remove a breakpoint */
1097 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1099 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1101 for(i = 0; i < env->nb_breakpoints; i++) {
1102 if (env->breakpoints[i] == pc)
1107 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1108 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1109 env->nb_breakpoints--;
1111 breakpoint_invalidate(env, pc);
1118 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1119 CPU loop after each instruction */
1120 void cpu_single_step(CPUState *env, int enabled)
1122 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1123 if (env->singlestep_enabled != enabled) {
1124 env->singlestep_enabled = enabled;
1125 /* must flush all the translated code to avoid inconsistancies */
1126 /* XXX: only flush what is necessary */
1132 /* enable or disable low levels log */
1133 void cpu_set_log(int log_flags)
1135 loglevel = log_flags;
1136 if (loglevel && !logfile) {
1137 logfile = fopen(logfilename, "w");
1139 perror(logfilename);
1142 #if !defined(CONFIG_SOFTMMU)
1143 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1145 static uint8_t logfile_buf[4096];
1146 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1149 setvbuf(logfile, NULL, _IOLBF, 0);
1154 void cpu_set_log_filename(const char *filename)
1156 logfilename = strdup(filename);
1159 /* mask must never be zero, except for A20 change call */
1160 void cpu_interrupt(CPUState *env, int mask)
1162 TranslationBlock *tb;
1163 static int interrupt_lock;
1165 env->interrupt_request |= mask;
1166 /* if the cpu is currently executing code, we must unlink it and
1167 all the potentially executing TB */
1168 tb = env->current_tb;
1169 if (tb && !testandset(&interrupt_lock)) {
1170 env->current_tb = NULL;
1171 tb_reset_jump_recursive(tb);
1176 void cpu_reset_interrupt(CPUState *env, int mask)
1178 env->interrupt_request &= ~mask;
1181 CPULogItem cpu_log_items[] = {
1182 { CPU_LOG_TB_OUT_ASM, "out_asm",
1183 "show generated host assembly code for each compiled TB" },
1184 { CPU_LOG_TB_IN_ASM, "in_asm",
1185 "show target assembly code for each compiled TB" },
1186 { CPU_LOG_TB_OP, "op",
1187 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1189 { CPU_LOG_TB_OP_OPT, "op_opt",
1190 "show micro ops after optimization for each compiled TB" },
1192 { CPU_LOG_INT, "int",
1193 "show interrupts/exceptions in short format" },
1194 { CPU_LOG_EXEC, "exec",
1195 "show trace before each executed TB (lots of logs)" },
1196 { CPU_LOG_TB_CPU, "cpu",
1197 "show CPU state before bloc translation" },
1199 { CPU_LOG_PCALL, "pcall",
1200 "show protected mode far calls/returns/exceptions" },
1203 { CPU_LOG_IOPORT, "ioport",
1204 "show all i/o ports accesses" },
1209 static int cmp1(const char *s1, int n, const char *s2)
1211 if (strlen(s2) != n)
1213 return memcmp(s1, s2, n) == 0;
1216 /* takes a comma separated list of log masks. Return 0 if error. */
1217 int cpu_str_to_log_mask(const char *str)
1226 p1 = strchr(p, ',');
1229 if(cmp1(p,p1-p,"all")) {
1230 for(item = cpu_log_items; item->mask != 0; item++) {
1234 for(item = cpu_log_items; item->mask != 0; item++) {
1235 if (cmp1(p, p1 - p, item->name))
1249 void cpu_abort(CPUState *env, const char *fmt, ...)
1254 fprintf(stderr, "qemu: fatal: ");
1255 vfprintf(stderr, fmt, ap);
1256 fprintf(stderr, "\n");
1258 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1260 cpu_dump_state(env, stderr, fprintf, 0);
1266 #if !defined(CONFIG_USER_ONLY)
1268 /* NOTE: if flush_global is true, also flush global entries (not
1270 void tlb_flush(CPUState *env, int flush_global)
1274 #if defined(DEBUG_TLB)
1275 printf("tlb_flush:\n");
1277 /* must reset current TB so that interrupts cannot modify the
1278 links while we are modifying them */
1279 env->current_tb = NULL;
1281 for(i = 0; i < CPU_TLB_SIZE; i++) {
1282 env->tlb_read[0][i].address = -1;
1283 env->tlb_write[0][i].address = -1;
1284 env->tlb_read[1][i].address = -1;
1285 env->tlb_write[1][i].address = -1;
1289 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1291 #if !defined(CONFIG_SOFTMMU)
1292 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1296 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1298 if (addr == (tlb_entry->address &
1299 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1300 tlb_entry->address = -1;
1303 void tlb_flush_page(CPUState *env, target_ulong addr)
1308 TranslationBlock *tb;
1310 #if defined(DEBUG_TLB)
1311 printf("tlb_flush_page: 0x%08x\n", addr);
1313 /* must reset current TB so that interrupts cannot modify the
1314 links while we are modifying them */
1315 env->current_tb = NULL;
1317 addr &= TARGET_PAGE_MASK;
1318 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1319 tlb_flush_entry(&env->tlb_read[0][i], addr);
1320 tlb_flush_entry(&env->tlb_write[0][i], addr);
1321 tlb_flush_entry(&env->tlb_read[1][i], addr);
1322 tlb_flush_entry(&env->tlb_write[1][i], addr);
1324 /* remove from the virtual pc hash table all the TB at this
1327 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1328 if (vp && vp->valid_tag == virt_valid_tag) {
1329 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1331 /* we remove all the links to the TBs in this virtual page */
1333 while (tb != NULL) {
1335 tb = (TranslationBlock *)((long)tb & ~3);
1336 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1337 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1340 tb = tb->page_next[n];
1346 #if !defined(CONFIG_SOFTMMU)
1347 if (addr < MMAP_AREA_END)
1348 munmap((void *)addr, TARGET_PAGE_SIZE);
1352 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1354 if (addr == (tlb_entry->address &
1355 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1356 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1357 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1358 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1362 /* update the TLBs so that writes to code in the virtual page 'addr'
1364 static void tlb_protect_code(CPUState *env, target_ulong addr)
1368 addr &= TARGET_PAGE_MASK;
1369 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1370 tlb_protect_code1(&env->tlb_write[0][i], addr);
1371 tlb_protect_code1(&env->tlb_write[1][i], addr);
1372 #if !defined(CONFIG_SOFTMMU)
1373 /* NOTE: as we generated the code for this page, it is already at
1375 if (addr < MMAP_AREA_END)
1376 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1380 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1381 unsigned long phys_addr)
1383 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1384 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1385 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1389 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1390 tested self modifying code */
1391 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1395 phys_addr &= TARGET_PAGE_MASK;
1396 phys_addr += (long)phys_ram_base;
1397 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1398 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1399 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1402 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1403 unsigned long start, unsigned long length)
1406 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1407 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1408 if ((addr - start) < length) {
1409 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1414 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1417 unsigned long length, start1;
1420 start &= TARGET_PAGE_MASK;
1421 end = TARGET_PAGE_ALIGN(end);
1423 length = end - start;
1426 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1428 env = cpu_single_env;
1429 /* we modify the TLB cache so that the dirty bit will be set again
1430 when accessing the range */
1431 start1 = start + (unsigned long)phys_ram_base;
1432 for(i = 0; i < CPU_TLB_SIZE; i++)
1433 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1434 for(i = 0; i < CPU_TLB_SIZE; i++)
1435 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1437 #if !defined(CONFIG_SOFTMMU)
1438 /* XXX: this is expensive */
1444 for(i = 0; i < L1_SIZE; i++) {
1447 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1448 for(j = 0; j < L2_SIZE; j++) {
1449 if (p->valid_tag == virt_valid_tag &&
1450 p->phys_addr >= start && p->phys_addr < end &&
1451 (p->prot & PROT_WRITE)) {
1452 if (addr < MMAP_AREA_END) {
1453 mprotect((void *)addr, TARGET_PAGE_SIZE,
1454 p->prot & ~PROT_WRITE);
1457 addr += TARGET_PAGE_SIZE;
1466 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1467 unsigned long start)
1470 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1471 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1472 if (addr == start) {
1473 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1478 /* update the TLB corresponding to virtual page vaddr and phys addr
1479 addr so that it is no longer dirty */
1480 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1482 CPUState *env = cpu_single_env;
1485 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1487 addr &= TARGET_PAGE_MASK;
1488 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1489 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1490 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1493 /* add a new TLB entry. At most one entry for a given virtual address
1494 is permitted. Return 0 if OK or 2 if the page could not be mapped
1495 (can only happen in non SOFTMMU mode for I/O pages or pages
1496 conflicting with the host address space). */
1497 int tlb_set_page(CPUState *env, target_ulong vaddr,
1498 target_phys_addr_t paddr, int prot,
1499 int is_user, int is_softmmu)
1503 TranslationBlock *first_tb;
1505 target_ulong address;
1506 unsigned long addend;
1509 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1512 pd = IO_MEM_UNASSIGNED;
1515 pd = p->phys_offset;
1516 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1517 /* NOTE: we also allocate the page at this stage */
1518 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1519 first_tb = p1->first_tb;
1522 #if defined(DEBUG_TLB)
1523 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1524 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1528 #if !defined(CONFIG_SOFTMMU)
1532 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1533 /* IO memory case */
1534 address = vaddr | pd;
1537 /* standard memory */
1539 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1542 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1544 if (prot & PAGE_READ) {
1545 env->tlb_read[is_user][index].address = address;
1546 env->tlb_read[is_user][index].addend = addend;
1548 env->tlb_read[is_user][index].address = -1;
1549 env->tlb_read[is_user][index].addend = -1;
1551 if (prot & PAGE_WRITE) {
1552 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1553 /* ROM: access is ignored (same as unassigned) */
1554 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1555 env->tlb_write[is_user][index].addend = addend;
1557 /* XXX: the PowerPC code seems not ready to handle
1558 self modifying code with DCBI */
1559 #if defined(TARGET_HAS_SMC) || 1
1561 /* if code is present, we use a specific memory
1562 handler. It works only for physical memory access */
1563 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1564 env->tlb_write[is_user][index].addend = addend;
1567 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1568 !cpu_physical_memory_is_dirty(pd)) {
1569 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1570 env->tlb_write[is_user][index].addend = addend;
1572 env->tlb_write[is_user][index].address = address;
1573 env->tlb_write[is_user][index].addend = addend;
1576 env->tlb_write[is_user][index].address = -1;
1577 env->tlb_write[is_user][index].addend = -1;
1580 #if !defined(CONFIG_SOFTMMU)
1582 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1583 /* IO access: no mapping is done as it will be handled by the
1585 if (!(env->hflags & HF_SOFTMMU_MASK))
1590 if (vaddr >= MMAP_AREA_END) {
1593 if (prot & PROT_WRITE) {
1594 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1595 #if defined(TARGET_HAS_SMC) || 1
1598 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1599 !cpu_physical_memory_is_dirty(pd))) {
1600 /* ROM: we do as if code was inside */
1601 /* if code is present, we only map as read only and save the
1605 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1608 vp->valid_tag = virt_valid_tag;
1609 prot &= ~PAGE_WRITE;
1612 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1613 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1614 if (map_addr == MAP_FAILED) {
1615 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1625 /* called from signal handler: invalidate the code and unprotect the
1626 page. Return TRUE if the fault was succesfully handled. */
1627 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1629 #if !defined(CONFIG_SOFTMMU)
1632 #if defined(DEBUG_TLB)
1633 printf("page_unprotect: addr=0x%08x\n", addr);
1635 addr &= TARGET_PAGE_MASK;
1637 /* if it is not mapped, no need to worry here */
1638 if (addr >= MMAP_AREA_END)
1640 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1643 /* NOTE: in this case, validate_tag is _not_ tested as it
1644 validates only the code TLB */
1645 if (vp->valid_tag != virt_valid_tag)
1647 if (!(vp->prot & PAGE_WRITE))
1649 #if defined(DEBUG_TLB)
1650 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1651 addr, vp->phys_addr, vp->prot);
1653 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1654 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1655 (unsigned long)addr, vp->prot);
1656 /* set the dirty bit */
1657 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1658 /* flush the code inside */
1659 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1668 void tlb_flush(CPUState *env, int flush_global)
1672 void tlb_flush_page(CPUState *env, target_ulong addr)
1676 int tlb_set_page(CPUState *env, target_ulong vaddr,
1677 target_phys_addr_t paddr, int prot,
1678 int is_user, int is_softmmu)
1683 /* dump memory mappings */
1684 void page_dump(FILE *f)
1686 unsigned long start, end;
1687 int i, j, prot, prot1;
1690 fprintf(f, "%-8s %-8s %-8s %s\n",
1691 "start", "end", "size", "prot");
1695 for(i = 0; i <= L1_SIZE; i++) {
1700 for(j = 0;j < L2_SIZE; j++) {
1705 if (prot1 != prot) {
1706 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1708 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1709 start, end, end - start,
1710 prot & PAGE_READ ? 'r' : '-',
1711 prot & PAGE_WRITE ? 'w' : '-',
1712 prot & PAGE_EXEC ? 'x' : '-');
1726 int page_get_flags(unsigned long address)
1730 p = page_find(address >> TARGET_PAGE_BITS);
1736 /* modify the flags of a page and invalidate the code if
1737 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1738 depending on PAGE_WRITE */
1739 void page_set_flags(unsigned long start, unsigned long end, int flags)
1744 start = start & TARGET_PAGE_MASK;
1745 end = TARGET_PAGE_ALIGN(end);
1746 if (flags & PAGE_WRITE)
1747 flags |= PAGE_WRITE_ORG;
1748 spin_lock(&tb_lock);
1749 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1750 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1751 /* if the write protection is set, then we invalidate the code
1753 if (!(p->flags & PAGE_WRITE) &&
1754 (flags & PAGE_WRITE) &&
1756 tb_invalidate_phys_page(addr, 0, NULL);
1760 spin_unlock(&tb_lock);
1763 /* called from signal handler: invalidate the code and unprotect the
1764 page. Return TRUE if the fault was succesfully handled. */
1765 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1767 unsigned int page_index, prot, pindex;
1769 unsigned long host_start, host_end, addr;
1771 host_start = address & qemu_host_page_mask;
1772 page_index = host_start >> TARGET_PAGE_BITS;
1773 p1 = page_find(page_index);
1776 host_end = host_start + qemu_host_page_size;
1779 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1783 /* if the page was really writable, then we change its
1784 protection back to writable */
1785 if (prot & PAGE_WRITE_ORG) {
1786 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1787 if (!(p1[pindex].flags & PAGE_WRITE)) {
1788 mprotect((void *)host_start, qemu_host_page_size,
1789 (prot & PAGE_BITS) | PAGE_WRITE);
1790 p1[pindex].flags |= PAGE_WRITE;
1791 /* and since the content will be modified, we must invalidate
1792 the corresponding translated code. */
1793 tb_invalidate_phys_page(address, pc, puc);
1794 #ifdef DEBUG_TB_CHECK
1795 tb_invalidate_check(address);
1803 /* call this function when system calls directly modify a memory area */
1804 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1806 unsigned long start, end, addr;
1808 start = (unsigned long)data;
1809 end = start + data_size;
1810 start &= TARGET_PAGE_MASK;
1811 end = TARGET_PAGE_ALIGN(end);
1812 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1813 page_unprotect(addr, 0, NULL);
1817 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1820 #endif /* defined(CONFIG_USER_ONLY) */
1822 /* register physical memory. 'size' must be a multiple of the target
1823 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1825 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1827 unsigned long phys_offset)
1829 unsigned long addr, end_addr;
1832 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1833 end_addr = start_addr + size;
1834 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1835 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1836 p->phys_offset = phys_offset;
1837 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1838 phys_offset += TARGET_PAGE_SIZE;
1842 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1847 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1851 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1852 unassigned_mem_readb,
1853 unassigned_mem_readb,
1854 unassigned_mem_readb,
1857 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1858 unassigned_mem_writeb,
1859 unassigned_mem_writeb,
1860 unassigned_mem_writeb,
1863 /* self modifying code support in soft mmu mode : writing to a page
1864 containing code comes to these functions */
1866 static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1868 unsigned long phys_addr;
1870 phys_addr = addr - (unsigned long)phys_ram_base;
1871 #if !defined(CONFIG_USER_ONLY)
1872 tb_invalidate_phys_page_fast(phys_addr, 1);
1874 stb_raw((uint8_t *)addr, val);
1875 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1878 static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1880 unsigned long phys_addr;
1882 phys_addr = addr - (unsigned long)phys_ram_base;
1883 #if !defined(CONFIG_USER_ONLY)
1884 tb_invalidate_phys_page_fast(phys_addr, 2);
1886 stw_raw((uint8_t *)addr, val);
1887 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1890 static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1892 unsigned long phys_addr;
1894 phys_addr = addr - (unsigned long)phys_ram_base;
1895 #if !defined(CONFIG_USER_ONLY)
1896 tb_invalidate_phys_page_fast(phys_addr, 4);
1898 stl_raw((uint8_t *)addr, val);
1899 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1902 static CPUReadMemoryFunc *code_mem_read[3] = {
1903 NULL, /* never used */
1904 NULL, /* never used */
1905 NULL, /* never used */
1908 static CPUWriteMemoryFunc *code_mem_write[3] = {
1914 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1916 stb_raw((uint8_t *)addr, val);
1917 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1920 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1922 stw_raw((uint8_t *)addr, val);
1923 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1926 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1928 stl_raw((uint8_t *)addr, val);
1929 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1932 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1933 notdirty_mem_writeb,
1934 notdirty_mem_writew,
1935 notdirty_mem_writel,
1938 static void io_mem_init(void)
1940 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1941 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1942 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1943 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1946 /* alloc dirty bits array */
1947 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1950 /* mem_read and mem_write are arrays of functions containing the
1951 function to access byte (index 0), word (index 1) and dword (index
1952 2). All functions must be supplied. If io_index is non zero, the
1953 corresponding io zone is modified. If it is zero, a new io zone is
1954 allocated. The return value can be used with
1955 cpu_register_physical_memory(). (-1) is returned if error. */
1956 int cpu_register_io_memory(int io_index,
1957 CPUReadMemoryFunc **mem_read,
1958 CPUWriteMemoryFunc **mem_write,
1963 if (io_index <= 0) {
1964 if (io_index >= IO_MEM_NB_ENTRIES)
1966 io_index = io_mem_nb++;
1968 if (io_index >= IO_MEM_NB_ENTRIES)
1972 for(i = 0;i < 3; i++) {
1973 io_mem_read[io_index][i] = mem_read[i];
1974 io_mem_write[io_index][i] = mem_write[i];
1976 io_mem_opaque[io_index] = opaque;
1977 return io_index << IO_MEM_SHIFT;
1980 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1982 return io_mem_write[io_index >> IO_MEM_SHIFT];
1985 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1987 return io_mem_read[io_index >> IO_MEM_SHIFT];
1990 /* physical memory access (slow version, mainly for debug) */
1991 #if defined(CONFIG_USER_ONLY)
1992 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1993 int len, int is_write)
1999 page = addr & TARGET_PAGE_MASK;
2000 l = (page + TARGET_PAGE_SIZE) - addr;
2003 flags = page_get_flags(page);
2004 if (!(flags & PAGE_VALID))
2007 if (!(flags & PAGE_WRITE))
2009 memcpy((uint8_t *)addr, buf, len);
2011 if (!(flags & PAGE_READ))
2013 memcpy(buf, (uint8_t *)addr, len);
2021 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2022 int len, int is_write)
2027 target_phys_addr_t page;
2032 page = addr & TARGET_PAGE_MASK;
2033 l = (page + TARGET_PAGE_SIZE) - addr;
2036 p = phys_page_find(page >> TARGET_PAGE_BITS);
2038 pd = IO_MEM_UNASSIGNED;
2040 pd = p->phys_offset;
2044 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2045 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2046 if (l >= 4 && ((addr & 3) == 0)) {
2047 /* 32 bit read access */
2049 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2051 } else if (l >= 2 && ((addr & 1) == 0)) {
2052 /* 16 bit read access */
2053 val = lduw_raw(buf);
2054 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2058 val = ldub_raw(buf);
2059 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2063 unsigned long addr1;
2064 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2066 ptr = phys_ram_base + addr1;
2067 memcpy(ptr, buf, l);
2068 /* invalidate code */
2069 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2071 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
2074 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2075 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2077 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2078 if (l >= 4 && ((addr & 3) == 0)) {
2079 /* 32 bit read access */
2080 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2083 } else if (l >= 2 && ((addr & 1) == 0)) {
2084 /* 16 bit read access */
2085 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2090 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2096 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2097 (addr & ~TARGET_PAGE_MASK);
2098 memcpy(buf, ptr, l);
2108 /* virtual memory access for debug */
2109 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2110 uint8_t *buf, int len, int is_write)
2113 target_ulong page, phys_addr;
2116 page = addr & TARGET_PAGE_MASK;
2117 phys_addr = cpu_get_phys_page_debug(env, page);
2118 /* if no physical page mapped, return an error */
2119 if (phys_addr == -1)
2121 l = (page + TARGET_PAGE_SIZE) - addr;
2124 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2133 #if !defined(CONFIG_USER_ONLY)
2135 #define MMUSUFFIX _cmmu
2136 #define GETPC() NULL
2137 #define env cpu_single_env
2138 #define SOFTMMU_CODE_ACCESS
2141 #include "softmmu_template.h"
2144 #include "softmmu_template.h"
2147 #include "softmmu_template.h"
2150 #include "softmmu_template.h"