2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
38 //#define DEBUG_TB_INVALIDATE
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
55 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
56 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
58 /* any access to the tbs or the page table must use this lock */
59 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
61 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
62 uint8_t *code_gen_ptr;
66 uint8_t *phys_ram_base;
67 uint8_t *phys_ram_dirty;
69 typedef struct PageDesc {
70 /* list of TBs intersecting this ram page */
71 TranslationBlock *first_tb;
72 /* in order to optimize self modifying code, we count the number
73 of lookups we do to a given page to use a bitmap */
74 unsigned int code_write_count;
76 #if defined(CONFIG_USER_ONLY)
81 typedef struct PhysPageDesc {
82 /* offset in host memory of the page + io_index in the low 12 bits */
83 unsigned long phys_offset;
86 typedef struct VirtPageDesc {
87 /* physical address of code page. It is valid only if 'valid_tag'
88 matches 'virt_valid_tag' */
89 target_ulong phys_addr;
90 unsigned int valid_tag;
91 #if !defined(CONFIG_SOFTMMU)
92 /* original page access rights. It is valid only if 'valid_tag'
93 matches 'virt_valid_tag' */
99 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
101 #define L1_SIZE (1 << L1_BITS)
102 #define L2_SIZE (1 << L2_BITS)
104 static void io_mem_init(void);
106 unsigned long qemu_real_host_page_size;
107 unsigned long qemu_host_page_bits;
108 unsigned long qemu_host_page_size;
109 unsigned long qemu_host_page_mask;
111 /* XXX: for system emulation, it could just be an array */
112 static PageDesc *l1_map[L1_SIZE];
113 static PhysPageDesc *l1_phys_map[L1_SIZE];
115 #if !defined(CONFIG_USER_ONLY)
116 static VirtPageDesc *l1_virt_map[L1_SIZE];
117 static unsigned int virt_valid_tag;
120 /* io memory support */
121 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
122 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
123 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
124 static int io_mem_nb;
127 char *logfilename = "/tmp/qemu.log";
131 static void page_init(void)
133 /* NOTE: we can always suppose that qemu_host_page_size >=
137 SYSTEM_INFO system_info;
140 GetSystemInfo(&system_info);
141 qemu_real_host_page_size = system_info.dwPageSize;
143 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
144 PAGE_EXECUTE_READWRITE, &old_protect);
147 qemu_real_host_page_size = getpagesize();
149 unsigned long start, end;
151 start = (unsigned long)code_gen_buffer;
152 start &= ~(qemu_real_host_page_size - 1);
154 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
155 end += qemu_real_host_page_size - 1;
156 end &= ~(qemu_real_host_page_size - 1);
158 mprotect((void *)start, end - start,
159 PROT_READ | PROT_WRITE | PROT_EXEC);
163 if (qemu_host_page_size == 0)
164 qemu_host_page_size = qemu_real_host_page_size;
165 if (qemu_host_page_size < TARGET_PAGE_SIZE)
166 qemu_host_page_size = TARGET_PAGE_SIZE;
167 qemu_host_page_bits = 0;
168 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
169 qemu_host_page_bits++;
170 qemu_host_page_mask = ~(qemu_host_page_size - 1);
171 #if !defined(CONFIG_USER_ONLY)
176 static inline PageDesc *page_find_alloc(unsigned int index)
180 lp = &l1_map[index >> L2_BITS];
183 /* allocate if not found */
184 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
185 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
188 return p + (index & (L2_SIZE - 1));
191 static inline PageDesc *page_find(unsigned int index)
195 p = l1_map[index >> L2_BITS];
198 return p + (index & (L2_SIZE - 1));
201 static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
203 PhysPageDesc **lp, *p;
205 lp = &l1_phys_map[index >> L2_BITS];
208 /* allocate if not found */
209 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
210 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
213 return p + (index & (L2_SIZE - 1));
216 static inline PhysPageDesc *phys_page_find(unsigned int index)
220 p = l1_phys_map[index >> L2_BITS];
223 return p + (index & (L2_SIZE - 1));
226 #if !defined(CONFIG_USER_ONLY)
227 static void tlb_protect_code(CPUState *env, target_ulong addr);
228 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
230 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
232 VirtPageDesc **lp, *p;
234 lp = &l1_virt_map[index >> L2_BITS];
237 /* allocate if not found */
238 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
239 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
242 return p + (index & (L2_SIZE - 1));
245 static inline VirtPageDesc *virt_page_find(unsigned int index)
249 p = l1_virt_map[index >> L2_BITS];
252 return p + (index & (L2_SIZE - 1));
255 static void virt_page_flush(void)
262 if (virt_valid_tag == 0) {
264 for(i = 0; i < L1_SIZE; i++) {
267 for(j = 0; j < L2_SIZE; j++)
274 static void virt_page_flush(void)
279 void cpu_exec_init(void)
282 code_gen_ptr = code_gen_buffer;
288 static inline void invalidate_page_bitmap(PageDesc *p)
290 if (p->code_bitmap) {
291 qemu_free(p->code_bitmap);
292 p->code_bitmap = NULL;
294 p->code_write_count = 0;
297 /* set to NULL all the 'first_tb' fields in all PageDescs */
298 static void page_flush_tb(void)
303 for(i = 0; i < L1_SIZE; i++) {
306 for(j = 0; j < L2_SIZE; j++) {
308 invalidate_page_bitmap(p);
315 /* flush all the translation blocks */
316 /* XXX: tb_flush is currently not thread safe */
317 void tb_flush(CPUState *env)
319 #if defined(DEBUG_FLUSH)
320 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
321 code_gen_ptr - code_gen_buffer,
323 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
326 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
329 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
332 code_gen_ptr = code_gen_buffer;
333 /* XXX: flush processor icache at this point if cache flush is
337 #ifdef DEBUG_TB_CHECK
339 static void tb_invalidate_check(unsigned long address)
341 TranslationBlock *tb;
343 address &= TARGET_PAGE_MASK;
344 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
345 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
346 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
347 address >= tb->pc + tb->size)) {
348 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
349 address, tb->pc, tb->size);
355 /* verify that all the pages have correct rights for code */
356 static void tb_page_check(void)
358 TranslationBlock *tb;
359 int i, flags1, flags2;
361 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
362 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
363 flags1 = page_get_flags(tb->pc);
364 flags2 = page_get_flags(tb->pc + tb->size - 1);
365 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
366 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
367 tb->pc, tb->size, flags1, flags2);
373 void tb_jmp_check(TranslationBlock *tb)
375 TranslationBlock *tb1;
378 /* suppress any remaining jumps to this TB */
382 tb1 = (TranslationBlock *)((long)tb1 & ~3);
385 tb1 = tb1->jmp_next[n1];
387 /* check end of list */
389 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
395 /* invalidate one TB */
396 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
399 TranslationBlock *tb1;
403 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
406 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
410 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
412 TranslationBlock *tb1;
418 tb1 = (TranslationBlock *)((long)tb1 & ~3);
420 *ptb = tb1->page_next[n1];
423 ptb = &tb1->page_next[n1];
427 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
429 TranslationBlock *tb1, **ptb;
432 ptb = &tb->jmp_next[n];
435 /* find tb(n) in circular list */
439 tb1 = (TranslationBlock *)((long)tb1 & ~3);
440 if (n1 == n && tb1 == tb)
443 ptb = &tb1->jmp_first;
445 ptb = &tb1->jmp_next[n1];
448 /* now we can suppress tb(n) from the list */
449 *ptb = tb->jmp_next[n];
451 tb->jmp_next[n] = NULL;
455 /* reset the jump entry 'n' of a TB so that it is not chained to
457 static inline void tb_reset_jump(TranslationBlock *tb, int n)
459 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
462 static inline void tb_invalidate(TranslationBlock *tb)
465 TranslationBlock *tb1, *tb2, **ptb;
467 tb_invalidated_flag = 1;
469 /* remove the TB from the hash list */
470 h = tb_hash_func(tb->pc);
474 /* NOTE: the TB is not necessarily linked in the hash. It
475 indicates that it is not currently used */
479 *ptb = tb1->hash_next;
482 ptb = &tb1->hash_next;
485 /* suppress this TB from the two jump lists */
486 tb_jmp_remove(tb, 0);
487 tb_jmp_remove(tb, 1);
489 /* suppress any remaining jumps to this TB */
495 tb1 = (TranslationBlock *)((long)tb1 & ~3);
496 tb2 = tb1->jmp_next[n1];
497 tb_reset_jump(tb1, n1);
498 tb1->jmp_next[n1] = NULL;
501 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
504 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
508 target_ulong phys_pc;
510 /* remove the TB from the hash list */
511 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
512 h = tb_phys_hash_func(phys_pc);
513 tb_remove(&tb_phys_hash[h], tb,
514 offsetof(TranslationBlock, phys_hash_next));
516 /* remove the TB from the page list */
517 if (tb->page_addr[0] != page_addr) {
518 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
519 tb_page_remove(&p->first_tb, tb);
520 invalidate_page_bitmap(p);
522 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
523 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
524 tb_page_remove(&p->first_tb, tb);
525 invalidate_page_bitmap(p);
531 static inline void set_bits(uint8_t *tab, int start, int len)
537 mask = 0xff << (start & 7);
538 if ((start & ~7) == (end & ~7)) {
540 mask &= ~(0xff << (end & 7));
545 start = (start + 8) & ~7;
547 while (start < end1) {
552 mask = ~(0xff << (end & 7));
558 static void build_page_bitmap(PageDesc *p)
560 int n, tb_start, tb_end;
561 TranslationBlock *tb;
563 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
566 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
571 tb = (TranslationBlock *)((long)tb & ~3);
572 /* NOTE: this is subtle as a TB may span two physical pages */
574 /* NOTE: tb_end may be after the end of the page, but
575 it is not a problem */
576 tb_start = tb->pc & ~TARGET_PAGE_MASK;
577 tb_end = tb_start + tb->size;
578 if (tb_end > TARGET_PAGE_SIZE)
579 tb_end = TARGET_PAGE_SIZE;
582 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
584 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
585 tb = tb->page_next[n];
589 #ifdef TARGET_HAS_PRECISE_SMC
591 static void tb_gen_code(CPUState *env,
592 target_ulong pc, target_ulong cs_base, int flags,
595 TranslationBlock *tb;
597 target_ulong phys_pc, phys_page2, virt_page2;
600 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
601 tb = tb_alloc((unsigned long)pc);
603 /* flush must be done */
605 /* cannot fail at this point */
606 tb = tb_alloc((unsigned long)pc);
608 tc_ptr = code_gen_ptr;
610 tb->cs_base = cs_base;
613 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
614 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
616 /* check next page if needed */
617 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
619 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
620 phys_page2 = get_phys_addr_code(env, virt_page2);
622 tb_link_phys(tb, phys_pc, phys_page2);
626 /* invalidate all TBs which intersect with the target physical page
627 starting in range [start;end[. NOTE: start and end must refer to
628 the same physical page. 'is_cpu_write_access' should be true if called
629 from a real cpu write access: the virtual CPU will exit the current
630 TB if code is modified inside this TB. */
631 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
632 int is_cpu_write_access)
634 int n, current_tb_modified, current_tb_not_found, current_flags;
635 CPUState *env = cpu_single_env;
637 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
638 target_ulong tb_start, tb_end;
639 target_ulong current_pc, current_cs_base;
641 p = page_find(start >> TARGET_PAGE_BITS);
644 if (!p->code_bitmap &&
645 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
646 is_cpu_write_access) {
647 /* build code bitmap */
648 build_page_bitmap(p);
651 /* we remove all the TBs in the range [start, end[ */
652 /* XXX: see if in some cases it could be faster to invalidate all the code */
653 current_tb_not_found = is_cpu_write_access;
654 current_tb_modified = 0;
655 current_tb = NULL; /* avoid warning */
656 current_pc = 0; /* avoid warning */
657 current_cs_base = 0; /* avoid warning */
658 current_flags = 0; /* avoid warning */
662 tb = (TranslationBlock *)((long)tb & ~3);
663 tb_next = tb->page_next[n];
664 /* NOTE: this is subtle as a TB may span two physical pages */
666 /* NOTE: tb_end may be after the end of the page, but
667 it is not a problem */
668 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
669 tb_end = tb_start + tb->size;
671 tb_start = tb->page_addr[1];
672 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
674 if (!(tb_end <= start || tb_start >= end)) {
675 #ifdef TARGET_HAS_PRECISE_SMC
676 if (current_tb_not_found) {
677 current_tb_not_found = 0;
679 if (env->mem_write_pc) {
680 /* now we have a real cpu fault */
681 current_tb = tb_find_pc(env->mem_write_pc);
684 if (current_tb == tb &&
685 !(current_tb->cflags & CF_SINGLE_INSN)) {
686 /* If we are modifying the current TB, we must stop
687 its execution. We could be more precise by checking
688 that the modification is after the current PC, but it
689 would require a specialized function to partially
690 restore the CPU state */
692 current_tb_modified = 1;
693 cpu_restore_state(current_tb, env,
694 env->mem_write_pc, NULL);
695 #if defined(TARGET_I386)
696 current_flags = env->hflags;
697 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
698 current_cs_base = (target_ulong)env->segs[R_CS].base;
699 current_pc = current_cs_base + env->eip;
701 #error unsupported CPU
704 #endif /* TARGET_HAS_PRECISE_SMC */
705 saved_tb = env->current_tb;
706 env->current_tb = NULL;
707 tb_phys_invalidate(tb, -1);
708 env->current_tb = saved_tb;
709 if (env->interrupt_request && env->current_tb)
710 cpu_interrupt(env, env->interrupt_request);
714 #if !defined(CONFIG_USER_ONLY)
715 /* if no code remaining, no need to continue to use slow writes */
717 invalidate_page_bitmap(p);
718 if (is_cpu_write_access) {
719 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
723 #ifdef TARGET_HAS_PRECISE_SMC
724 if (current_tb_modified) {
725 /* we generate a block containing just the instruction
726 modifying the memory. It will ensure that it cannot modify
728 env->current_tb = NULL;
729 tb_gen_code(env, current_pc, current_cs_base, current_flags,
731 cpu_resume_from_signal(env, NULL);
736 /* len must be <= 8 and start must be a multiple of len */
737 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
744 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
745 cpu_single_env->mem_write_vaddr, len,
747 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
751 p = page_find(start >> TARGET_PAGE_BITS);
754 if (p->code_bitmap) {
755 offset = start & ~TARGET_PAGE_MASK;
756 b = p->code_bitmap[offset >> 3] >> (offset & 7);
757 if (b & ((1 << len) - 1))
761 tb_invalidate_phys_page_range(start, start + len, 1);
765 #if !defined(CONFIG_SOFTMMU)
766 static void tb_invalidate_phys_page(target_ulong addr,
767 unsigned long pc, void *puc)
769 int n, current_flags, current_tb_modified;
770 target_ulong current_pc, current_cs_base;
772 TranslationBlock *tb, *current_tb;
773 #ifdef TARGET_HAS_PRECISE_SMC
774 CPUState *env = cpu_single_env;
777 addr &= TARGET_PAGE_MASK;
778 p = page_find(addr >> TARGET_PAGE_BITS);
782 current_tb_modified = 0;
784 current_pc = 0; /* avoid warning */
785 current_cs_base = 0; /* avoid warning */
786 current_flags = 0; /* avoid warning */
787 #ifdef TARGET_HAS_PRECISE_SMC
789 current_tb = tb_find_pc(pc);
794 tb = (TranslationBlock *)((long)tb & ~3);
795 #ifdef TARGET_HAS_PRECISE_SMC
796 if (current_tb == tb &&
797 !(current_tb->cflags & CF_SINGLE_INSN)) {
798 /* If we are modifying the current TB, we must stop
799 its execution. We could be more precise by checking
800 that the modification is after the current PC, but it
801 would require a specialized function to partially
802 restore the CPU state */
804 current_tb_modified = 1;
805 cpu_restore_state(current_tb, env, pc, puc);
806 #if defined(TARGET_I386)
807 current_flags = env->hflags;
808 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
809 current_cs_base = (target_ulong)env->segs[R_CS].base;
810 current_pc = current_cs_base + env->eip;
812 #error unsupported CPU
815 #endif /* TARGET_HAS_PRECISE_SMC */
816 tb_phys_invalidate(tb, addr);
817 tb = tb->page_next[n];
820 #ifdef TARGET_HAS_PRECISE_SMC
821 if (current_tb_modified) {
822 /* we generate a block containing just the instruction
823 modifying the memory. It will ensure that it cannot modify
825 env->current_tb = NULL;
826 tb_gen_code(env, current_pc, current_cs_base, current_flags,
828 cpu_resume_from_signal(env, puc);
834 /* add the tb in the target page and protect it if necessary */
835 static inline void tb_alloc_page(TranslationBlock *tb,
836 unsigned int n, unsigned int page_addr)
839 TranslationBlock *last_first_tb;
841 tb->page_addr[n] = page_addr;
842 p = page_find(page_addr >> TARGET_PAGE_BITS);
843 tb->page_next[n] = p->first_tb;
844 last_first_tb = p->first_tb;
845 p->first_tb = (TranslationBlock *)((long)tb | n);
846 invalidate_page_bitmap(p);
848 #if defined(TARGET_HAS_SMC) || 1
850 #if defined(CONFIG_USER_ONLY)
851 if (p->flags & PAGE_WRITE) {
852 unsigned long host_start, host_end, addr;
855 /* force the host page as non writable (writes will have a
856 page fault + mprotect overhead) */
857 host_start = page_addr & qemu_host_page_mask;
858 host_end = host_start + qemu_host_page_size;
860 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
861 prot |= page_get_flags(addr);
862 mprotect((void *)host_start, qemu_host_page_size,
863 (prot & PAGE_BITS) & ~PAGE_WRITE);
864 #ifdef DEBUG_TB_INVALIDATE
865 printf("protecting code page: 0x%08lx\n",
868 p->flags &= ~PAGE_WRITE;
871 /* if some code is already present, then the pages are already
872 protected. So we handle the case where only the first TB is
873 allocated in a physical page */
874 if (!last_first_tb) {
875 target_ulong virt_addr;
877 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
878 tlb_protect_code(cpu_single_env, virt_addr);
882 #endif /* TARGET_HAS_SMC */
885 /* Allocate a new translation block. Flush the translation buffer if
886 too many translation blocks or too much generated code. */
887 TranslationBlock *tb_alloc(unsigned long pc)
889 TranslationBlock *tb;
891 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
892 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
900 /* add a new TB and link it to the physical page tables. phys_page2 is
901 (-1) to indicate that only one page contains the TB. */
902 void tb_link_phys(TranslationBlock *tb,
903 target_ulong phys_pc, target_ulong phys_page2)
906 TranslationBlock **ptb;
908 /* add in the physical hash table */
909 h = tb_phys_hash_func(phys_pc);
910 ptb = &tb_phys_hash[h];
911 tb->phys_hash_next = *ptb;
914 /* add in the page list */
915 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
916 if (phys_page2 != -1)
917 tb_alloc_page(tb, 1, phys_page2);
919 tb->page_addr[1] = -1;
920 #ifdef DEBUG_TB_CHECK
925 /* link the tb with the other TBs */
926 void tb_link(TranslationBlock *tb)
928 #if !defined(CONFIG_USER_ONLY)
933 /* save the code memory mappings (needed to invalidate the code) */
934 addr = tb->pc & TARGET_PAGE_MASK;
935 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
936 #ifdef DEBUG_TLB_CHECK
937 if (vp->valid_tag == virt_valid_tag &&
938 vp->phys_addr != tb->page_addr[0]) {
939 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
940 addr, tb->page_addr[0], vp->phys_addr);
943 vp->phys_addr = tb->page_addr[0];
944 if (vp->valid_tag != virt_valid_tag) {
945 vp->valid_tag = virt_valid_tag;
946 #if !defined(CONFIG_SOFTMMU)
951 if (tb->page_addr[1] != -1) {
952 addr += TARGET_PAGE_SIZE;
953 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
954 #ifdef DEBUG_TLB_CHECK
955 if (vp->valid_tag == virt_valid_tag &&
956 vp->phys_addr != tb->page_addr[1]) {
957 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
958 addr, tb->page_addr[1], vp->phys_addr);
961 vp->phys_addr = tb->page_addr[1];
962 if (vp->valid_tag != virt_valid_tag) {
963 vp->valid_tag = virt_valid_tag;
964 #if !defined(CONFIG_SOFTMMU)
972 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
973 tb->jmp_next[0] = NULL;
974 tb->jmp_next[1] = NULL;
976 tb->cflags &= ~CF_FP_USED;
977 if (tb->cflags & CF_TB_FP_USED)
978 tb->cflags |= CF_FP_USED;
981 /* init original jump addresses */
982 if (tb->tb_next_offset[0] != 0xffff)
983 tb_reset_jump(tb, 0);
984 if (tb->tb_next_offset[1] != 0xffff)
985 tb_reset_jump(tb, 1);
988 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
989 tb[1].tc_ptr. Return NULL if not found */
990 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
994 TranslationBlock *tb;
998 if (tc_ptr < (unsigned long)code_gen_buffer ||
999 tc_ptr >= (unsigned long)code_gen_ptr)
1001 /* binary search (cf Knuth) */
1004 while (m_min <= m_max) {
1005 m = (m_min + m_max) >> 1;
1007 v = (unsigned long)tb->tc_ptr;
1010 else if (tc_ptr < v) {
1019 static void tb_reset_jump_recursive(TranslationBlock *tb);
1021 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1023 TranslationBlock *tb1, *tb_next, **ptb;
1026 tb1 = tb->jmp_next[n];
1028 /* find head of list */
1031 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1034 tb1 = tb1->jmp_next[n1];
1036 /* we are now sure now that tb jumps to tb1 */
1039 /* remove tb from the jmp_first list */
1040 ptb = &tb_next->jmp_first;
1044 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1045 if (n1 == n && tb1 == tb)
1047 ptb = &tb1->jmp_next[n1];
1049 *ptb = tb->jmp_next[n];
1050 tb->jmp_next[n] = NULL;
1052 /* suppress the jump to next tb in generated code */
1053 tb_reset_jump(tb, n);
1055 /* suppress jumps in the tb on which we could have jumped */
1056 tb_reset_jump_recursive(tb_next);
1060 static void tb_reset_jump_recursive(TranslationBlock *tb)
1062 tb_reset_jump_recursive2(tb, 0);
1063 tb_reset_jump_recursive2(tb, 1);
1066 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1068 target_ulong phys_addr;
1070 phys_addr = cpu_get_phys_page_debug(env, pc);
1071 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1074 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1075 breakpoint is reached */
1076 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1078 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1081 for(i = 0; i < env->nb_breakpoints; i++) {
1082 if (env->breakpoints[i] == pc)
1086 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1088 env->breakpoints[env->nb_breakpoints++] = pc;
1090 breakpoint_invalidate(env, pc);
1097 /* remove a breakpoint */
1098 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1100 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1102 for(i = 0; i < env->nb_breakpoints; i++) {
1103 if (env->breakpoints[i] == pc)
1108 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1109 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1110 env->nb_breakpoints--;
1112 breakpoint_invalidate(env, pc);
1119 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1120 CPU loop after each instruction */
1121 void cpu_single_step(CPUState *env, int enabled)
1123 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1124 if (env->singlestep_enabled != enabled) {
1125 env->singlestep_enabled = enabled;
1126 /* must flush all the translated code to avoid inconsistancies */
1127 /* XXX: only flush what is necessary */
1133 /* enable or disable low levels log */
1134 void cpu_set_log(int log_flags)
1136 loglevel = log_flags;
1137 if (loglevel && !logfile) {
1138 logfile = fopen(logfilename, "w");
1140 perror(logfilename);
1143 #if !defined(CONFIG_SOFTMMU)
1144 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1146 static uint8_t logfile_buf[4096];
1147 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1150 setvbuf(logfile, NULL, _IOLBF, 0);
1155 void cpu_set_log_filename(const char *filename)
1157 logfilename = strdup(filename);
1160 /* mask must never be zero, except for A20 change call */
1161 void cpu_interrupt(CPUState *env, int mask)
1163 TranslationBlock *tb;
1164 static int interrupt_lock;
1166 env->interrupt_request |= mask;
1167 /* if the cpu is currently executing code, we must unlink it and
1168 all the potentially executing TB */
1169 tb = env->current_tb;
1170 if (tb && !testandset(&interrupt_lock)) {
1171 env->current_tb = NULL;
1172 tb_reset_jump_recursive(tb);
1177 void cpu_reset_interrupt(CPUState *env, int mask)
1179 env->interrupt_request &= ~mask;
1182 CPULogItem cpu_log_items[] = {
1183 { CPU_LOG_TB_OUT_ASM, "out_asm",
1184 "show generated host assembly code for each compiled TB" },
1185 { CPU_LOG_TB_IN_ASM, "in_asm",
1186 "show target assembly code for each compiled TB" },
1187 { CPU_LOG_TB_OP, "op",
1188 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1190 { CPU_LOG_TB_OP_OPT, "op_opt",
1191 "show micro ops after optimization for each compiled TB" },
1193 { CPU_LOG_INT, "int",
1194 "show interrupts/exceptions in short format" },
1195 { CPU_LOG_EXEC, "exec",
1196 "show trace before each executed TB (lots of logs)" },
1197 { CPU_LOG_TB_CPU, "cpu",
1198 "show CPU state before bloc translation" },
1200 { CPU_LOG_PCALL, "pcall",
1201 "show protected mode far calls/returns/exceptions" },
1204 { CPU_LOG_IOPORT, "ioport",
1205 "show all i/o ports accesses" },
1210 static int cmp1(const char *s1, int n, const char *s2)
1212 if (strlen(s2) != n)
1214 return memcmp(s1, s2, n) == 0;
1217 /* takes a comma separated list of log masks. Return 0 if error. */
1218 int cpu_str_to_log_mask(const char *str)
1227 p1 = strchr(p, ',');
1230 if(cmp1(p,p1-p,"all")) {
1231 for(item = cpu_log_items; item->mask != 0; item++) {
1235 for(item = cpu_log_items; item->mask != 0; item++) {
1236 if (cmp1(p, p1 - p, item->name))
1250 void cpu_abort(CPUState *env, const char *fmt, ...)
1255 fprintf(stderr, "qemu: fatal: ");
1256 vfprintf(stderr, fmt, ap);
1257 fprintf(stderr, "\n");
1259 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1261 cpu_dump_state(env, stderr, fprintf, 0);
1267 #if !defined(CONFIG_USER_ONLY)
1269 /* NOTE: if flush_global is true, also flush global entries (not
1271 void tlb_flush(CPUState *env, int flush_global)
1275 #if defined(DEBUG_TLB)
1276 printf("tlb_flush:\n");
1278 /* must reset current TB so that interrupts cannot modify the
1279 links while we are modifying them */
1280 env->current_tb = NULL;
1282 for(i = 0; i < CPU_TLB_SIZE; i++) {
1283 env->tlb_read[0][i].address = -1;
1284 env->tlb_write[0][i].address = -1;
1285 env->tlb_read[1][i].address = -1;
1286 env->tlb_write[1][i].address = -1;
1290 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1292 #if !defined(CONFIG_SOFTMMU)
1293 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1297 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1299 if (addr == (tlb_entry->address &
1300 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1301 tlb_entry->address = -1;
1304 void tlb_flush_page(CPUState *env, target_ulong addr)
1309 TranslationBlock *tb;
1311 #if defined(DEBUG_TLB)
1312 printf("tlb_flush_page: 0x%08x\n", addr);
1314 /* must reset current TB so that interrupts cannot modify the
1315 links while we are modifying them */
1316 env->current_tb = NULL;
1318 addr &= TARGET_PAGE_MASK;
1319 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1320 tlb_flush_entry(&env->tlb_read[0][i], addr);
1321 tlb_flush_entry(&env->tlb_write[0][i], addr);
1322 tlb_flush_entry(&env->tlb_read[1][i], addr);
1323 tlb_flush_entry(&env->tlb_write[1][i], addr);
1325 /* remove from the virtual pc hash table all the TB at this
1328 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1329 if (vp && vp->valid_tag == virt_valid_tag) {
1330 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1332 /* we remove all the links to the TBs in this virtual page */
1334 while (tb != NULL) {
1336 tb = (TranslationBlock *)((long)tb & ~3);
1337 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1338 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1341 tb = tb->page_next[n];
1347 #if !defined(CONFIG_SOFTMMU)
1348 if (addr < MMAP_AREA_END)
1349 munmap((void *)addr, TARGET_PAGE_SIZE);
1353 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1355 if (addr == (tlb_entry->address &
1356 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1357 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1358 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1359 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1363 /* update the TLBs so that writes to code in the virtual page 'addr'
1365 static void tlb_protect_code(CPUState *env, target_ulong addr)
1369 addr &= TARGET_PAGE_MASK;
1370 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1371 tlb_protect_code1(&env->tlb_write[0][i], addr);
1372 tlb_protect_code1(&env->tlb_write[1][i], addr);
1373 #if !defined(CONFIG_SOFTMMU)
1374 /* NOTE: as we generated the code for this page, it is already at
1376 if (addr < MMAP_AREA_END)
1377 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1381 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1382 unsigned long phys_addr)
1384 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1385 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1386 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1390 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1391 tested self modifying code */
1392 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1396 phys_addr &= TARGET_PAGE_MASK;
1397 phys_addr += (long)phys_ram_base;
1398 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1399 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1400 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1403 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1404 unsigned long start, unsigned long length)
1407 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1408 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1409 if ((addr - start) < length) {
1410 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1415 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1418 unsigned long length, start1;
1421 start &= TARGET_PAGE_MASK;
1422 end = TARGET_PAGE_ALIGN(end);
1424 length = end - start;
1427 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1429 env = cpu_single_env;
1430 /* we modify the TLB cache so that the dirty bit will be set again
1431 when accessing the range */
1432 start1 = start + (unsigned long)phys_ram_base;
1433 for(i = 0; i < CPU_TLB_SIZE; i++)
1434 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1435 for(i = 0; i < CPU_TLB_SIZE; i++)
1436 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1438 #if !defined(CONFIG_SOFTMMU)
1439 /* XXX: this is expensive */
1445 for(i = 0; i < L1_SIZE; i++) {
1448 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1449 for(j = 0; j < L2_SIZE; j++) {
1450 if (p->valid_tag == virt_valid_tag &&
1451 p->phys_addr >= start && p->phys_addr < end &&
1452 (p->prot & PROT_WRITE)) {
1453 if (addr < MMAP_AREA_END) {
1454 mprotect((void *)addr, TARGET_PAGE_SIZE,
1455 p->prot & ~PROT_WRITE);
1458 addr += TARGET_PAGE_SIZE;
1467 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1468 unsigned long start)
1471 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1472 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1473 if (addr == start) {
1474 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1479 /* update the TLB corresponding to virtual page vaddr and phys addr
1480 addr so that it is no longer dirty */
1481 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1483 CPUState *env = cpu_single_env;
1486 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1488 addr &= TARGET_PAGE_MASK;
1489 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1490 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1491 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1494 /* add a new TLB entry. At most one entry for a given virtual address
1495 is permitted. Return 0 if OK or 2 if the page could not be mapped
1496 (can only happen in non SOFTMMU mode for I/O pages or pages
1497 conflicting with the host address space). */
1498 int tlb_set_page(CPUState *env, target_ulong vaddr,
1499 target_phys_addr_t paddr, int prot,
1500 int is_user, int is_softmmu)
1504 TranslationBlock *first_tb;
1506 target_ulong address;
1507 unsigned long addend;
1510 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1513 pd = IO_MEM_UNASSIGNED;
1516 pd = p->phys_offset;
1517 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1518 /* NOTE: we also allocate the page at this stage */
1519 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1520 first_tb = p1->first_tb;
1523 #if defined(DEBUG_TLB)
1524 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1525 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1529 #if !defined(CONFIG_SOFTMMU)
1533 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1534 /* IO memory case */
1535 address = vaddr | pd;
1538 /* standard memory */
1540 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1543 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1545 if (prot & PAGE_READ) {
1546 env->tlb_read[is_user][index].address = address;
1547 env->tlb_read[is_user][index].addend = addend;
1549 env->tlb_read[is_user][index].address = -1;
1550 env->tlb_read[is_user][index].addend = -1;
1552 if (prot & PAGE_WRITE) {
1553 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1554 /* ROM: access is ignored (same as unassigned) */
1555 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1556 env->tlb_write[is_user][index].addend = addend;
1558 /* XXX: the PowerPC code seems not ready to handle
1559 self modifying code with DCBI */
1560 #if defined(TARGET_HAS_SMC) || 1
1562 /* if code is present, we use a specific memory
1563 handler. It works only for physical memory access */
1564 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1565 env->tlb_write[is_user][index].addend = addend;
1568 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1569 !cpu_physical_memory_is_dirty(pd)) {
1570 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1571 env->tlb_write[is_user][index].addend = addend;
1573 env->tlb_write[is_user][index].address = address;
1574 env->tlb_write[is_user][index].addend = addend;
1577 env->tlb_write[is_user][index].address = -1;
1578 env->tlb_write[is_user][index].addend = -1;
1581 #if !defined(CONFIG_SOFTMMU)
1583 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1584 /* IO access: no mapping is done as it will be handled by the
1586 if (!(env->hflags & HF_SOFTMMU_MASK))
1591 if (vaddr >= MMAP_AREA_END) {
1594 if (prot & PROT_WRITE) {
1595 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1596 #if defined(TARGET_HAS_SMC) || 1
1599 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1600 !cpu_physical_memory_is_dirty(pd))) {
1601 /* ROM: we do as if code was inside */
1602 /* if code is present, we only map as read only and save the
1606 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1609 vp->valid_tag = virt_valid_tag;
1610 prot &= ~PAGE_WRITE;
1613 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1614 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1615 if (map_addr == MAP_FAILED) {
1616 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1626 /* called from signal handler: invalidate the code and unprotect the
1627 page. Return TRUE if the fault was succesfully handled. */
1628 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1630 #if !defined(CONFIG_SOFTMMU)
1633 #if defined(DEBUG_TLB)
1634 printf("page_unprotect: addr=0x%08x\n", addr);
1636 addr &= TARGET_PAGE_MASK;
1638 /* if it is not mapped, no need to worry here */
1639 if (addr >= MMAP_AREA_END)
1641 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1644 /* NOTE: in this case, validate_tag is _not_ tested as it
1645 validates only the code TLB */
1646 if (vp->valid_tag != virt_valid_tag)
1648 if (!(vp->prot & PAGE_WRITE))
1650 #if defined(DEBUG_TLB)
1651 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1652 addr, vp->phys_addr, vp->prot);
1654 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1655 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1656 (unsigned long)addr, vp->prot);
1657 /* set the dirty bit */
1658 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1659 /* flush the code inside */
1660 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1669 void tlb_flush(CPUState *env, int flush_global)
1673 void tlb_flush_page(CPUState *env, target_ulong addr)
1677 int tlb_set_page(CPUState *env, target_ulong vaddr,
1678 target_phys_addr_t paddr, int prot,
1679 int is_user, int is_softmmu)
1684 /* dump memory mappings */
1685 void page_dump(FILE *f)
1687 unsigned long start, end;
1688 int i, j, prot, prot1;
1691 fprintf(f, "%-8s %-8s %-8s %s\n",
1692 "start", "end", "size", "prot");
1696 for(i = 0; i <= L1_SIZE; i++) {
1701 for(j = 0;j < L2_SIZE; j++) {
1706 if (prot1 != prot) {
1707 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1709 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1710 start, end, end - start,
1711 prot & PAGE_READ ? 'r' : '-',
1712 prot & PAGE_WRITE ? 'w' : '-',
1713 prot & PAGE_EXEC ? 'x' : '-');
1727 int page_get_flags(unsigned long address)
1731 p = page_find(address >> TARGET_PAGE_BITS);
1737 /* modify the flags of a page and invalidate the code if
1738 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1739 depending on PAGE_WRITE */
1740 void page_set_flags(unsigned long start, unsigned long end, int flags)
1745 start = start & TARGET_PAGE_MASK;
1746 end = TARGET_PAGE_ALIGN(end);
1747 if (flags & PAGE_WRITE)
1748 flags |= PAGE_WRITE_ORG;
1749 spin_lock(&tb_lock);
1750 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1751 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1752 /* if the write protection is set, then we invalidate the code
1754 if (!(p->flags & PAGE_WRITE) &&
1755 (flags & PAGE_WRITE) &&
1757 tb_invalidate_phys_page(addr, 0, NULL);
1761 spin_unlock(&tb_lock);
1764 /* called from signal handler: invalidate the code and unprotect the
1765 page. Return TRUE if the fault was succesfully handled. */
1766 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1768 unsigned int page_index, prot, pindex;
1770 unsigned long host_start, host_end, addr;
1772 host_start = address & qemu_host_page_mask;
1773 page_index = host_start >> TARGET_PAGE_BITS;
1774 p1 = page_find(page_index);
1777 host_end = host_start + qemu_host_page_size;
1780 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1784 /* if the page was really writable, then we change its
1785 protection back to writable */
1786 if (prot & PAGE_WRITE_ORG) {
1787 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1788 if (!(p1[pindex].flags & PAGE_WRITE)) {
1789 mprotect((void *)host_start, qemu_host_page_size,
1790 (prot & PAGE_BITS) | PAGE_WRITE);
1791 p1[pindex].flags |= PAGE_WRITE;
1792 /* and since the content will be modified, we must invalidate
1793 the corresponding translated code. */
1794 tb_invalidate_phys_page(address, pc, puc);
1795 #ifdef DEBUG_TB_CHECK
1796 tb_invalidate_check(address);
1804 /* call this function when system calls directly modify a memory area */
1805 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1807 unsigned long start, end, addr;
1809 start = (unsigned long)data;
1810 end = start + data_size;
1811 start &= TARGET_PAGE_MASK;
1812 end = TARGET_PAGE_ALIGN(end);
1813 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1814 page_unprotect(addr, 0, NULL);
1818 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1821 #endif /* defined(CONFIG_USER_ONLY) */
1823 /* register physical memory. 'size' must be a multiple of the target
1824 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1826 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1828 unsigned long phys_offset)
1830 unsigned long addr, end_addr;
1833 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1834 end_addr = start_addr + size;
1835 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1836 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1837 p->phys_offset = phys_offset;
1838 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1839 phys_offset += TARGET_PAGE_SIZE;
1843 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1848 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1852 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1853 unassigned_mem_readb,
1854 unassigned_mem_readb,
1855 unassigned_mem_readb,
1858 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1859 unassigned_mem_writeb,
1860 unassigned_mem_writeb,
1861 unassigned_mem_writeb,
1864 /* self modifying code support in soft mmu mode : writing to a page
1865 containing code comes to these functions */
1867 static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1869 unsigned long phys_addr;
1871 phys_addr = addr - (unsigned long)phys_ram_base;
1872 #if !defined(CONFIG_USER_ONLY)
1873 tb_invalidate_phys_page_fast(phys_addr, 1);
1875 stb_raw((uint8_t *)addr, val);
1876 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1879 static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1881 unsigned long phys_addr;
1883 phys_addr = addr - (unsigned long)phys_ram_base;
1884 #if !defined(CONFIG_USER_ONLY)
1885 tb_invalidate_phys_page_fast(phys_addr, 2);
1887 stw_raw((uint8_t *)addr, val);
1888 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1891 static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1893 unsigned long phys_addr;
1895 phys_addr = addr - (unsigned long)phys_ram_base;
1896 #if !defined(CONFIG_USER_ONLY)
1897 tb_invalidate_phys_page_fast(phys_addr, 4);
1899 stl_raw((uint8_t *)addr, val);
1900 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1903 static CPUReadMemoryFunc *code_mem_read[3] = {
1904 NULL, /* never used */
1905 NULL, /* never used */
1906 NULL, /* never used */
1909 static CPUWriteMemoryFunc *code_mem_write[3] = {
1915 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1917 stb_raw((uint8_t *)addr, val);
1918 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1921 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1923 stw_raw((uint8_t *)addr, val);
1924 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1927 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1929 stl_raw((uint8_t *)addr, val);
1930 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1933 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1934 notdirty_mem_writeb,
1935 notdirty_mem_writew,
1936 notdirty_mem_writel,
1939 static void io_mem_init(void)
1941 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1942 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1943 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1944 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1947 /* alloc dirty bits array */
1948 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1951 /* mem_read and mem_write are arrays of functions containing the
1952 function to access byte (index 0), word (index 1) and dword (index
1953 2). All functions must be supplied. If io_index is non zero, the
1954 corresponding io zone is modified. If it is zero, a new io zone is
1955 allocated. The return value can be used with
1956 cpu_register_physical_memory(). (-1) is returned if error. */
1957 int cpu_register_io_memory(int io_index,
1958 CPUReadMemoryFunc **mem_read,
1959 CPUWriteMemoryFunc **mem_write,
1964 if (io_index <= 0) {
1965 if (io_index >= IO_MEM_NB_ENTRIES)
1967 io_index = io_mem_nb++;
1969 if (io_index >= IO_MEM_NB_ENTRIES)
1973 for(i = 0;i < 3; i++) {
1974 io_mem_read[io_index][i] = mem_read[i];
1975 io_mem_write[io_index][i] = mem_write[i];
1977 io_mem_opaque[io_index] = opaque;
1978 return io_index << IO_MEM_SHIFT;
1981 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1983 return io_mem_write[io_index >> IO_MEM_SHIFT];
1986 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1988 return io_mem_read[io_index >> IO_MEM_SHIFT];
1991 /* physical memory access (slow version, mainly for debug) */
1992 #if defined(CONFIG_USER_ONLY)
1993 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1994 int len, int is_write)
2000 page = addr & TARGET_PAGE_MASK;
2001 l = (page + TARGET_PAGE_SIZE) - addr;
2004 flags = page_get_flags(page);
2005 if (!(flags & PAGE_VALID))
2008 if (!(flags & PAGE_WRITE))
2010 memcpy((uint8_t *)addr, buf, len);
2012 if (!(flags & PAGE_READ))
2014 memcpy(buf, (uint8_t *)addr, len);
2022 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2023 int len, int is_write)
2028 target_phys_addr_t page;
2033 page = addr & TARGET_PAGE_MASK;
2034 l = (page + TARGET_PAGE_SIZE) - addr;
2037 p = phys_page_find(page >> TARGET_PAGE_BITS);
2039 pd = IO_MEM_UNASSIGNED;
2041 pd = p->phys_offset;
2045 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2046 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2047 if (l >= 4 && ((addr & 3) == 0)) {
2048 /* 32 bit read access */
2050 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2052 } else if (l >= 2 && ((addr & 1) == 0)) {
2053 /* 16 bit read access */
2054 val = lduw_raw(buf);
2055 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2059 val = ldub_raw(buf);
2060 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2064 unsigned long addr1;
2065 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2067 ptr = phys_ram_base + addr1;
2068 memcpy(ptr, buf, l);
2069 /* invalidate code */
2070 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2072 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
2075 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2076 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2078 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2079 if (l >= 4 && ((addr & 3) == 0)) {
2080 /* 32 bit read access */
2081 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2084 } else if (l >= 2 && ((addr & 1) == 0)) {
2085 /* 16 bit read access */
2086 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2091 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2097 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2098 (addr & ~TARGET_PAGE_MASK);
2099 memcpy(buf, ptr, l);
2109 /* virtual memory access for debug */
2110 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2111 uint8_t *buf, int len, int is_write)
2114 target_ulong page, phys_addr;
2117 page = addr & TARGET_PAGE_MASK;
2118 phys_addr = cpu_get_phys_page_debug(env, page);
2119 /* if no physical page mapped, return an error */
2120 if (phys_addr == -1)
2122 l = (page + TARGET_PAGE_SIZE) - addr;
2125 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2134 #if !defined(CONFIG_USER_ONLY)
2136 #define MMUSUFFIX _cmmu
2137 #define GETPC() NULL
2138 #define env cpu_single_env
2139 #define SOFTMMU_CODE_ACCESS
2142 #include "softmmu_template.h"
2145 #include "softmmu_template.h"
2148 #include "softmmu_template.h"
2151 #include "softmmu_template.h"