2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #if !defined(CONFIG_SOFTMMU)
35 //#define DEBUG_TB_INVALIDATE
39 /* make various TB consistency checks */
40 //#define DEBUG_TB_CHECK
41 //#define DEBUG_TLB_CHECK
43 /* threshold to flush the translated code buffer */
44 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
46 #define SMC_BITMAP_USE_THRESHOLD 10
48 #define MMAP_AREA_START 0x00000000
49 #define MMAP_AREA_END 0xa8000000
51 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
52 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
53 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
55 /* any access to the tbs or the page table must use this lock */
56 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
58 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
59 uint8_t *code_gen_ptr;
63 uint8_t *phys_ram_base;
64 uint8_t *phys_ram_dirty;
66 typedef struct PageDesc {
67 /* list of TBs intersecting this ram page */
68 TranslationBlock *first_tb;
69 /* in order to optimize self modifying code, we count the number
70 of lookups we do to a given page to use a bitmap */
71 unsigned int code_write_count;
73 #if defined(CONFIG_USER_ONLY)
78 typedef struct PhysPageDesc {
79 /* offset in host memory of the page + io_index in the low 12 bits */
80 unsigned long phys_offset;
83 typedef struct VirtPageDesc {
84 /* physical address of code page. It is valid only if 'valid_tag'
85 matches 'virt_valid_tag' */
86 target_ulong phys_addr;
87 unsigned int valid_tag;
88 #if !defined(CONFIG_SOFTMMU)
89 /* original page access rights. It is valid only if 'valid_tag'
90 matches 'virt_valid_tag' */
96 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
98 #define L1_SIZE (1 << L1_BITS)
99 #define L2_SIZE (1 << L2_BITS)
101 static void io_mem_init(void);
103 unsigned long qemu_real_host_page_size;
104 unsigned long qemu_host_page_bits;
105 unsigned long qemu_host_page_size;
106 unsigned long qemu_host_page_mask;
108 /* XXX: for system emulation, it could just be an array */
109 static PageDesc *l1_map[L1_SIZE];
110 static PhysPageDesc *l1_phys_map[L1_SIZE];
112 #if !defined(CONFIG_USER_ONLY)
113 static VirtPageDesc *l1_virt_map[L1_SIZE];
114 static unsigned int virt_valid_tag;
117 /* io memory support */
118 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
119 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
120 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
121 static int io_mem_nb;
124 char *logfilename = "/tmp/qemu.log";
128 static void page_init(void)
130 /* NOTE: we can always suppose that qemu_host_page_size >=
133 qemu_real_host_page_size = 4096;
135 qemu_real_host_page_size = getpagesize();
137 if (qemu_host_page_size == 0)
138 qemu_host_page_size = qemu_real_host_page_size;
139 if (qemu_host_page_size < TARGET_PAGE_SIZE)
140 qemu_host_page_size = TARGET_PAGE_SIZE;
141 qemu_host_page_bits = 0;
142 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
143 qemu_host_page_bits++;
144 qemu_host_page_mask = ~(qemu_host_page_size - 1);
145 #if !defined(CONFIG_USER_ONLY)
150 static inline PageDesc *page_find_alloc(unsigned int index)
154 lp = &l1_map[index >> L2_BITS];
157 /* allocate if not found */
158 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
159 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
162 return p + (index & (L2_SIZE - 1));
165 static inline PageDesc *page_find(unsigned int index)
169 p = l1_map[index >> L2_BITS];
172 return p + (index & (L2_SIZE - 1));
175 static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
177 PhysPageDesc **lp, *p;
179 lp = &l1_phys_map[index >> L2_BITS];
182 /* allocate if not found */
183 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
184 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
187 return p + (index & (L2_SIZE - 1));
190 static inline PhysPageDesc *phys_page_find(unsigned int index)
194 p = l1_phys_map[index >> L2_BITS];
197 return p + (index & (L2_SIZE - 1));
200 #if !defined(CONFIG_USER_ONLY)
201 static void tlb_protect_code(CPUState *env, target_ulong addr);
202 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
204 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
206 VirtPageDesc **lp, *p;
208 lp = &l1_virt_map[index >> L2_BITS];
211 /* allocate if not found */
212 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
213 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
216 return p + (index & (L2_SIZE - 1));
219 static inline VirtPageDesc *virt_page_find(unsigned int index)
223 p = l1_virt_map[index >> L2_BITS];
226 return p + (index & (L2_SIZE - 1));
229 static void virt_page_flush(void)
236 if (virt_valid_tag == 0) {
238 for(i = 0; i < L1_SIZE; i++) {
241 for(j = 0; j < L2_SIZE; j++)
248 static void virt_page_flush(void)
253 void cpu_exec_init(void)
256 code_gen_ptr = code_gen_buffer;
262 static inline void invalidate_page_bitmap(PageDesc *p)
264 if (p->code_bitmap) {
265 qemu_free(p->code_bitmap);
266 p->code_bitmap = NULL;
268 p->code_write_count = 0;
271 /* set to NULL all the 'first_tb' fields in all PageDescs */
272 static void page_flush_tb(void)
277 for(i = 0; i < L1_SIZE; i++) {
280 for(j = 0; j < L2_SIZE; j++) {
282 invalidate_page_bitmap(p);
289 /* flush all the translation blocks */
290 /* XXX: tb_flush is currently not thread safe */
291 void tb_flush(CPUState *env)
294 #if defined(DEBUG_FLUSH)
295 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
296 code_gen_ptr - code_gen_buffer,
298 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
301 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
305 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
306 tb_phys_hash[i] = NULL;
309 code_gen_ptr = code_gen_buffer;
310 /* XXX: flush processor icache at this point if cache flush is
314 #ifdef DEBUG_TB_CHECK
316 static void tb_invalidate_check(unsigned long address)
318 TranslationBlock *tb;
320 address &= TARGET_PAGE_MASK;
321 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
322 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
323 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
324 address >= tb->pc + tb->size)) {
325 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
326 address, tb->pc, tb->size);
332 /* verify that all the pages have correct rights for code */
333 static void tb_page_check(void)
335 TranslationBlock *tb;
336 int i, flags1, flags2;
338 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
339 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
340 flags1 = page_get_flags(tb->pc);
341 flags2 = page_get_flags(tb->pc + tb->size - 1);
342 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
343 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
344 tb->pc, tb->size, flags1, flags2);
350 void tb_jmp_check(TranslationBlock *tb)
352 TranslationBlock *tb1;
355 /* suppress any remaining jumps to this TB */
359 tb1 = (TranslationBlock *)((long)tb1 & ~3);
362 tb1 = tb1->jmp_next[n1];
364 /* check end of list */
366 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
372 /* invalidate one TB */
373 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
376 TranslationBlock *tb1;
380 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
383 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
387 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
389 TranslationBlock *tb1;
395 tb1 = (TranslationBlock *)((long)tb1 & ~3);
397 *ptb = tb1->page_next[n1];
400 ptb = &tb1->page_next[n1];
404 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
406 TranslationBlock *tb1, **ptb;
409 ptb = &tb->jmp_next[n];
412 /* find tb(n) in circular list */
416 tb1 = (TranslationBlock *)((long)tb1 & ~3);
417 if (n1 == n && tb1 == tb)
420 ptb = &tb1->jmp_first;
422 ptb = &tb1->jmp_next[n1];
425 /* now we can suppress tb(n) from the list */
426 *ptb = tb->jmp_next[n];
428 tb->jmp_next[n] = NULL;
432 /* reset the jump entry 'n' of a TB so that it is not chained to
434 static inline void tb_reset_jump(TranslationBlock *tb, int n)
436 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
439 static inline void tb_invalidate(TranslationBlock *tb)
442 TranslationBlock *tb1, *tb2, **ptb;
444 tb_invalidated_flag = 1;
446 /* remove the TB from the hash list */
447 h = tb_hash_func(tb->pc);
451 /* NOTE: the TB is not necessarily linked in the hash. It
452 indicates that it is not currently used */
456 *ptb = tb1->hash_next;
459 ptb = &tb1->hash_next;
462 /* suppress this TB from the two jump lists */
463 tb_jmp_remove(tb, 0);
464 tb_jmp_remove(tb, 1);
466 /* suppress any remaining jumps to this TB */
472 tb1 = (TranslationBlock *)((long)tb1 & ~3);
473 tb2 = tb1->jmp_next[n1];
474 tb_reset_jump(tb1, n1);
475 tb1->jmp_next[n1] = NULL;
478 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
481 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
485 target_ulong phys_pc;
487 /* remove the TB from the hash list */
488 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
489 h = tb_phys_hash_func(phys_pc);
490 tb_remove(&tb_phys_hash[h], tb,
491 offsetof(TranslationBlock, phys_hash_next));
493 /* remove the TB from the page list */
494 if (tb->page_addr[0] != page_addr) {
495 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
496 tb_page_remove(&p->first_tb, tb);
497 invalidate_page_bitmap(p);
499 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
500 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
501 tb_page_remove(&p->first_tb, tb);
502 invalidate_page_bitmap(p);
508 static inline void set_bits(uint8_t *tab, int start, int len)
514 mask = 0xff << (start & 7);
515 if ((start & ~7) == (end & ~7)) {
517 mask &= ~(0xff << (end & 7));
522 start = (start + 8) & ~7;
524 while (start < end1) {
529 mask = ~(0xff << (end & 7));
535 static void build_page_bitmap(PageDesc *p)
537 int n, tb_start, tb_end;
538 TranslationBlock *tb;
540 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
543 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
548 tb = (TranslationBlock *)((long)tb & ~3);
549 /* NOTE: this is subtle as a TB may span two physical pages */
551 /* NOTE: tb_end may be after the end of the page, but
552 it is not a problem */
553 tb_start = tb->pc & ~TARGET_PAGE_MASK;
554 tb_end = tb_start + tb->size;
555 if (tb_end > TARGET_PAGE_SIZE)
556 tb_end = TARGET_PAGE_SIZE;
559 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
561 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
562 tb = tb->page_next[n];
566 #ifdef TARGET_HAS_PRECISE_SMC
568 static void tb_gen_code(CPUState *env,
569 target_ulong pc, target_ulong cs_base, int flags,
572 TranslationBlock *tb;
574 target_ulong phys_pc, phys_page2, virt_page2;
577 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
578 tb = tb_alloc((unsigned long)pc);
580 /* flush must be done */
582 /* cannot fail at this point */
583 tb = tb_alloc((unsigned long)pc);
585 tc_ptr = code_gen_ptr;
587 tb->cs_base = cs_base;
590 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
591 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
593 /* check next page if needed */
594 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
596 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
597 phys_page2 = get_phys_addr_code(env, virt_page2);
599 tb_link_phys(tb, phys_pc, phys_page2);
603 /* invalidate all TBs which intersect with the target physical page
604 starting in range [start;end[. NOTE: start and end must refer to
605 the same physical page. 'is_cpu_write_access' should be true if called
606 from a real cpu write access: the virtual CPU will exit the current
607 TB if code is modified inside this TB. */
608 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
609 int is_cpu_write_access)
611 int n, current_tb_modified, current_tb_not_found, current_flags;
612 CPUState *env = cpu_single_env;
614 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
615 target_ulong tb_start, tb_end;
616 target_ulong current_pc, current_cs_base;
618 p = page_find(start >> TARGET_PAGE_BITS);
621 if (!p->code_bitmap &&
622 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
623 is_cpu_write_access) {
624 /* build code bitmap */
625 build_page_bitmap(p);
628 /* we remove all the TBs in the range [start, end[ */
629 /* XXX: see if in some cases it could be faster to invalidate all the code */
630 current_tb_not_found = is_cpu_write_access;
631 current_tb_modified = 0;
632 current_tb = NULL; /* avoid warning */
633 current_pc = 0; /* avoid warning */
634 current_cs_base = 0; /* avoid warning */
635 current_flags = 0; /* avoid warning */
639 tb = (TranslationBlock *)((long)tb & ~3);
640 tb_next = tb->page_next[n];
641 /* NOTE: this is subtle as a TB may span two physical pages */
643 /* NOTE: tb_end may be after the end of the page, but
644 it is not a problem */
645 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
646 tb_end = tb_start + tb->size;
648 tb_start = tb->page_addr[1];
649 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
651 if (!(tb_end <= start || tb_start >= end)) {
652 #ifdef TARGET_HAS_PRECISE_SMC
653 if (current_tb_not_found) {
654 current_tb_not_found = 0;
656 if (env->mem_write_pc) {
657 /* now we have a real cpu fault */
658 current_tb = tb_find_pc(env->mem_write_pc);
661 if (current_tb == tb &&
662 !(current_tb->cflags & CF_SINGLE_INSN)) {
663 /* If we are modifying the current TB, we must stop
664 its execution. We could be more precise by checking
665 that the modification is after the current PC, but it
666 would require a specialized function to partially
667 restore the CPU state */
669 current_tb_modified = 1;
670 cpu_restore_state(current_tb, env,
671 env->mem_write_pc, NULL);
672 #if defined(TARGET_I386)
673 current_flags = env->hflags;
674 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
675 current_cs_base = (target_ulong)env->segs[R_CS].base;
676 current_pc = current_cs_base + env->eip;
678 #error unsupported CPU
681 #endif /* TARGET_HAS_PRECISE_SMC */
682 saved_tb = env->current_tb;
683 env->current_tb = NULL;
684 tb_phys_invalidate(tb, -1);
685 env->current_tb = saved_tb;
686 if (env->interrupt_request && env->current_tb)
687 cpu_interrupt(env, env->interrupt_request);
691 #if !defined(CONFIG_USER_ONLY)
692 /* if no code remaining, no need to continue to use slow writes */
694 invalidate_page_bitmap(p);
695 if (is_cpu_write_access) {
696 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
700 #ifdef TARGET_HAS_PRECISE_SMC
701 if (current_tb_modified) {
702 /* we generate a block containing just the instruction
703 modifying the memory. It will ensure that it cannot modify
705 env->current_tb = NULL;
706 tb_gen_code(env, current_pc, current_cs_base, current_flags,
708 cpu_resume_from_signal(env, NULL);
713 /* len must be <= 8 and start must be a multiple of len */
714 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
721 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
722 cpu_single_env->mem_write_vaddr, len,
724 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
728 p = page_find(start >> TARGET_PAGE_BITS);
731 if (p->code_bitmap) {
732 offset = start & ~TARGET_PAGE_MASK;
733 b = p->code_bitmap[offset >> 3] >> (offset & 7);
734 if (b & ((1 << len) - 1))
738 tb_invalidate_phys_page_range(start, start + len, 1);
742 #if !defined(CONFIG_SOFTMMU)
743 static void tb_invalidate_phys_page(target_ulong addr,
744 unsigned long pc, void *puc)
746 int n, current_flags, current_tb_modified;
747 target_ulong current_pc, current_cs_base;
749 TranslationBlock *tb, *current_tb;
750 #ifdef TARGET_HAS_PRECISE_SMC
751 CPUState *env = cpu_single_env;
754 addr &= TARGET_PAGE_MASK;
755 p = page_find(addr >> TARGET_PAGE_BITS);
759 current_tb_modified = 0;
761 current_pc = 0; /* avoid warning */
762 current_cs_base = 0; /* avoid warning */
763 current_flags = 0; /* avoid warning */
764 #ifdef TARGET_HAS_PRECISE_SMC
766 current_tb = tb_find_pc(pc);
771 tb = (TranslationBlock *)((long)tb & ~3);
772 #ifdef TARGET_HAS_PRECISE_SMC
773 if (current_tb == tb &&
774 !(current_tb->cflags & CF_SINGLE_INSN)) {
775 /* If we are modifying the current TB, we must stop
776 its execution. We could be more precise by checking
777 that the modification is after the current PC, but it
778 would require a specialized function to partially
779 restore the CPU state */
781 current_tb_modified = 1;
782 cpu_restore_state(current_tb, env, pc, puc);
783 #if defined(TARGET_I386)
784 current_flags = env->hflags;
785 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
786 current_cs_base = (target_ulong)env->segs[R_CS].base;
787 current_pc = current_cs_base + env->eip;
789 #error unsupported CPU
792 #endif /* TARGET_HAS_PRECISE_SMC */
793 tb_phys_invalidate(tb, addr);
794 tb = tb->page_next[n];
797 #ifdef TARGET_HAS_PRECISE_SMC
798 if (current_tb_modified) {
799 /* we generate a block containing just the instruction
800 modifying the memory. It will ensure that it cannot modify
802 env->current_tb = NULL;
803 tb_gen_code(env, current_pc, current_cs_base, current_flags,
805 cpu_resume_from_signal(env, puc);
811 /* add the tb in the target page and protect it if necessary */
812 static inline void tb_alloc_page(TranslationBlock *tb,
813 unsigned int n, unsigned int page_addr)
816 TranslationBlock *last_first_tb;
818 tb->page_addr[n] = page_addr;
819 p = page_find(page_addr >> TARGET_PAGE_BITS);
820 tb->page_next[n] = p->first_tb;
821 last_first_tb = p->first_tb;
822 p->first_tb = (TranslationBlock *)((long)tb | n);
823 invalidate_page_bitmap(p);
825 #if defined(TARGET_HAS_SMC) || 1
827 #if defined(CONFIG_USER_ONLY)
828 if (p->flags & PAGE_WRITE) {
829 unsigned long host_start, host_end, addr;
832 /* force the host page as non writable (writes will have a
833 page fault + mprotect overhead) */
834 host_start = page_addr & qemu_host_page_mask;
835 host_end = host_start + qemu_host_page_size;
837 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
838 prot |= page_get_flags(addr);
839 mprotect((void *)host_start, qemu_host_page_size,
840 (prot & PAGE_BITS) & ~PAGE_WRITE);
841 #ifdef DEBUG_TB_INVALIDATE
842 printf("protecting code page: 0x%08lx\n",
845 p->flags &= ~PAGE_WRITE;
848 /* if some code is already present, then the pages are already
849 protected. So we handle the case where only the first TB is
850 allocated in a physical page */
851 if (!last_first_tb) {
852 target_ulong virt_addr;
854 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
855 tlb_protect_code(cpu_single_env, virt_addr);
859 #endif /* TARGET_HAS_SMC */
862 /* Allocate a new translation block. Flush the translation buffer if
863 too many translation blocks or too much generated code. */
864 TranslationBlock *tb_alloc(unsigned long pc)
866 TranslationBlock *tb;
868 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
869 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
877 /* add a new TB and link it to the physical page tables. phys_page2 is
878 (-1) to indicate that only one page contains the TB. */
879 void tb_link_phys(TranslationBlock *tb,
880 target_ulong phys_pc, target_ulong phys_page2)
883 TranslationBlock **ptb;
885 /* add in the physical hash table */
886 h = tb_phys_hash_func(phys_pc);
887 ptb = &tb_phys_hash[h];
888 tb->phys_hash_next = *ptb;
891 /* add in the page list */
892 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
893 if (phys_page2 != -1)
894 tb_alloc_page(tb, 1, phys_page2);
896 tb->page_addr[1] = -1;
897 #ifdef DEBUG_TB_CHECK
902 /* link the tb with the other TBs */
903 void tb_link(TranslationBlock *tb)
905 #if !defined(CONFIG_USER_ONLY)
910 /* save the code memory mappings (needed to invalidate the code) */
911 addr = tb->pc & TARGET_PAGE_MASK;
912 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
913 #ifdef DEBUG_TLB_CHECK
914 if (vp->valid_tag == virt_valid_tag &&
915 vp->phys_addr != tb->page_addr[0]) {
916 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
917 addr, tb->page_addr[0], vp->phys_addr);
920 vp->phys_addr = tb->page_addr[0];
921 if (vp->valid_tag != virt_valid_tag) {
922 vp->valid_tag = virt_valid_tag;
923 #if !defined(CONFIG_SOFTMMU)
928 if (tb->page_addr[1] != -1) {
929 addr += TARGET_PAGE_SIZE;
930 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
931 #ifdef DEBUG_TLB_CHECK
932 if (vp->valid_tag == virt_valid_tag &&
933 vp->phys_addr != tb->page_addr[1]) {
934 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
935 addr, tb->page_addr[1], vp->phys_addr);
938 vp->phys_addr = tb->page_addr[1];
939 if (vp->valid_tag != virt_valid_tag) {
940 vp->valid_tag = virt_valid_tag;
941 #if !defined(CONFIG_SOFTMMU)
949 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
950 tb->jmp_next[0] = NULL;
951 tb->jmp_next[1] = NULL;
953 tb->cflags &= ~CF_FP_USED;
954 if (tb->cflags & CF_TB_FP_USED)
955 tb->cflags |= CF_FP_USED;
958 /* init original jump addresses */
959 if (tb->tb_next_offset[0] != 0xffff)
960 tb_reset_jump(tb, 0);
961 if (tb->tb_next_offset[1] != 0xffff)
962 tb_reset_jump(tb, 1);
965 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
966 tb[1].tc_ptr. Return NULL if not found */
967 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
971 TranslationBlock *tb;
975 if (tc_ptr < (unsigned long)code_gen_buffer ||
976 tc_ptr >= (unsigned long)code_gen_ptr)
978 /* binary search (cf Knuth) */
981 while (m_min <= m_max) {
982 m = (m_min + m_max) >> 1;
984 v = (unsigned long)tb->tc_ptr;
987 else if (tc_ptr < v) {
996 static void tb_reset_jump_recursive(TranslationBlock *tb);
998 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1000 TranslationBlock *tb1, *tb_next, **ptb;
1003 tb1 = tb->jmp_next[n];
1005 /* find head of list */
1008 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1011 tb1 = tb1->jmp_next[n1];
1013 /* we are now sure now that tb jumps to tb1 */
1016 /* remove tb from the jmp_first list */
1017 ptb = &tb_next->jmp_first;
1021 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1022 if (n1 == n && tb1 == tb)
1024 ptb = &tb1->jmp_next[n1];
1026 *ptb = tb->jmp_next[n];
1027 tb->jmp_next[n] = NULL;
1029 /* suppress the jump to next tb in generated code */
1030 tb_reset_jump(tb, n);
1032 /* suppress jumps in the tb on which we could have jumped */
1033 tb_reset_jump_recursive(tb_next);
1037 static void tb_reset_jump_recursive(TranslationBlock *tb)
1039 tb_reset_jump_recursive2(tb, 0);
1040 tb_reset_jump_recursive2(tb, 1);
1043 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1045 target_ulong phys_addr;
1047 phys_addr = cpu_get_phys_page_debug(env, pc);
1048 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1051 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1052 breakpoint is reached */
1053 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1055 #if defined(TARGET_I386) || defined(TARGET_PPC)
1058 for(i = 0; i < env->nb_breakpoints; i++) {
1059 if (env->breakpoints[i] == pc)
1063 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1065 env->breakpoints[env->nb_breakpoints++] = pc;
1067 breakpoint_invalidate(env, pc);
1074 /* remove a breakpoint */
1075 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1077 #if defined(TARGET_I386) || defined(TARGET_PPC)
1079 for(i = 0; i < env->nb_breakpoints; i++) {
1080 if (env->breakpoints[i] == pc)
1085 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1086 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1087 env->nb_breakpoints--;
1089 breakpoint_invalidate(env, pc);
1096 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1097 CPU loop after each instruction */
1098 void cpu_single_step(CPUState *env, int enabled)
1100 #if defined(TARGET_I386) || defined(TARGET_PPC)
1101 if (env->singlestep_enabled != enabled) {
1102 env->singlestep_enabled = enabled;
1103 /* must flush all the translated code to avoid inconsistancies */
1104 /* XXX: only flush what is necessary */
1110 /* enable or disable low levels log */
1111 void cpu_set_log(int log_flags)
1113 loglevel = log_flags;
1114 if (loglevel && !logfile) {
1115 logfile = fopen(logfilename, "w");
1117 perror(logfilename);
1120 #if !defined(CONFIG_SOFTMMU)
1121 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1123 static uint8_t logfile_buf[4096];
1124 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1127 setvbuf(logfile, NULL, _IOLBF, 0);
1132 void cpu_set_log_filename(const char *filename)
1134 logfilename = strdup(filename);
1137 /* mask must never be zero, except for A20 change call */
1138 void cpu_interrupt(CPUState *env, int mask)
1140 TranslationBlock *tb;
1141 static int interrupt_lock;
1143 env->interrupt_request |= mask;
1144 /* if the cpu is currently executing code, we must unlink it and
1145 all the potentially executing TB */
1146 tb = env->current_tb;
1147 if (tb && !testandset(&interrupt_lock)) {
1148 env->current_tb = NULL;
1149 tb_reset_jump_recursive(tb);
1154 void cpu_reset_interrupt(CPUState *env, int mask)
1156 env->interrupt_request &= ~mask;
1159 CPULogItem cpu_log_items[] = {
1160 { CPU_LOG_TB_OUT_ASM, "out_asm",
1161 "show generated host assembly code for each compiled TB" },
1162 { CPU_LOG_TB_IN_ASM, "in_asm",
1163 "show target assembly code for each compiled TB" },
1164 { CPU_LOG_TB_OP, "op",
1165 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1167 { CPU_LOG_TB_OP_OPT, "op_opt",
1168 "show micro ops after optimization for each compiled TB" },
1170 { CPU_LOG_INT, "int",
1171 "show interrupts/exceptions in short format" },
1172 { CPU_LOG_EXEC, "exec",
1173 "show trace before each executed TB (lots of logs)" },
1174 { CPU_LOG_TB_CPU, "cpu",
1175 "show CPU state before bloc translation" },
1177 { CPU_LOG_PCALL, "pcall",
1178 "show protected mode far calls/returns/exceptions" },
1180 { CPU_LOG_IOPORT, "ioport",
1181 "show all i/o ports accesses" },
1185 static int cmp1(const char *s1, int n, const char *s2)
1187 if (strlen(s2) != n)
1189 return memcmp(s1, s2, n) == 0;
1192 /* takes a comma separated list of log masks. Return 0 if error. */
1193 int cpu_str_to_log_mask(const char *str)
1202 p1 = strchr(p, ',');
1205 for(item = cpu_log_items; item->mask != 0; item++) {
1206 if (cmp1(p, p1 - p, item->name))
1219 void cpu_abort(CPUState *env, const char *fmt, ...)
1224 fprintf(stderr, "qemu: fatal: ");
1225 vfprintf(stderr, fmt, ap);
1226 fprintf(stderr, "\n");
1228 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1234 #if !defined(CONFIG_USER_ONLY)
1236 /* NOTE: if flush_global is true, also flush global entries (not
1238 void tlb_flush(CPUState *env, int flush_global)
1242 #if defined(DEBUG_TLB)
1243 printf("tlb_flush:\n");
1245 /* must reset current TB so that interrupts cannot modify the
1246 links while we are modifying them */
1247 env->current_tb = NULL;
1249 for(i = 0; i < CPU_TLB_SIZE; i++) {
1250 env->tlb_read[0][i].address = -1;
1251 env->tlb_write[0][i].address = -1;
1252 env->tlb_read[1][i].address = -1;
1253 env->tlb_write[1][i].address = -1;
1257 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1260 #if !defined(CONFIG_SOFTMMU)
1261 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1265 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1267 if (addr == (tlb_entry->address &
1268 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1269 tlb_entry->address = -1;
1272 void tlb_flush_page(CPUState *env, target_ulong addr)
1277 TranslationBlock *tb;
1279 #if defined(DEBUG_TLB)
1280 printf("tlb_flush_page: 0x%08x\n", addr);
1282 /* must reset current TB so that interrupts cannot modify the
1283 links while we are modifying them */
1284 env->current_tb = NULL;
1286 addr &= TARGET_PAGE_MASK;
1287 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1288 tlb_flush_entry(&env->tlb_read[0][i], addr);
1289 tlb_flush_entry(&env->tlb_write[0][i], addr);
1290 tlb_flush_entry(&env->tlb_read[1][i], addr);
1291 tlb_flush_entry(&env->tlb_write[1][i], addr);
1293 /* remove from the virtual pc hash table all the TB at this
1296 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1297 if (vp && vp->valid_tag == virt_valid_tag) {
1298 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1300 /* we remove all the links to the TBs in this virtual page */
1302 while (tb != NULL) {
1304 tb = (TranslationBlock *)((long)tb & ~3);
1305 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1306 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1309 tb = tb->page_next[n];
1315 #if !defined(CONFIG_SOFTMMU)
1316 if (addr < MMAP_AREA_END)
1317 munmap((void *)addr, TARGET_PAGE_SIZE);
1321 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1323 if (addr == (tlb_entry->address &
1324 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1325 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1326 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1327 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1331 /* update the TLBs so that writes to code in the virtual page 'addr'
1333 static void tlb_protect_code(CPUState *env, target_ulong addr)
1337 addr &= TARGET_PAGE_MASK;
1338 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1339 tlb_protect_code1(&env->tlb_write[0][i], addr);
1340 tlb_protect_code1(&env->tlb_write[1][i], addr);
1341 #if !defined(CONFIG_SOFTMMU)
1342 /* NOTE: as we generated the code for this page, it is already at
1344 if (addr < MMAP_AREA_END)
1345 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1349 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1350 unsigned long phys_addr)
1352 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1353 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1354 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1358 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1359 tested self modifying code */
1360 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1364 phys_addr &= TARGET_PAGE_MASK;
1365 phys_addr += (long)phys_ram_base;
1366 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1367 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1368 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1371 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1372 unsigned long start, unsigned long length)
1375 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1376 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1377 if ((addr - start) < length) {
1378 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1383 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1386 unsigned long length, start1;
1389 start &= TARGET_PAGE_MASK;
1390 end = TARGET_PAGE_ALIGN(end);
1392 length = end - start;
1395 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1397 env = cpu_single_env;
1398 /* we modify the TLB cache so that the dirty bit will be set again
1399 when accessing the range */
1400 start1 = start + (unsigned long)phys_ram_base;
1401 for(i = 0; i < CPU_TLB_SIZE; i++)
1402 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1403 for(i = 0; i < CPU_TLB_SIZE; i++)
1404 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1406 #if !defined(CONFIG_SOFTMMU)
1407 /* XXX: this is expensive */
1413 for(i = 0; i < L1_SIZE; i++) {
1416 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1417 for(j = 0; j < L2_SIZE; j++) {
1418 if (p->valid_tag == virt_valid_tag &&
1419 p->phys_addr >= start && p->phys_addr < end &&
1420 (p->prot & PROT_WRITE)) {
1421 if (addr < MMAP_AREA_END) {
1422 mprotect((void *)addr, TARGET_PAGE_SIZE,
1423 p->prot & ~PROT_WRITE);
1426 addr += TARGET_PAGE_SIZE;
1435 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1436 unsigned long start)
1439 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1440 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1441 if (addr == start) {
1442 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1447 /* update the TLB corresponding to virtual page vaddr and phys addr
1448 addr so that it is no longer dirty */
1449 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1451 CPUState *env = cpu_single_env;
1454 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1456 addr &= TARGET_PAGE_MASK;
1457 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1458 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1459 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1462 /* add a new TLB entry. At most one entry for a given virtual address
1463 is permitted. Return 0 if OK or 2 if the page could not be mapped
1464 (can only happen in non SOFTMMU mode for I/O pages or pages
1465 conflicting with the host address space). */
1466 int tlb_set_page(CPUState *env, target_ulong vaddr,
1467 target_phys_addr_t paddr, int prot,
1468 int is_user, int is_softmmu)
1472 TranslationBlock *first_tb;
1474 target_ulong address;
1475 unsigned long addend;
1478 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1481 pd = IO_MEM_UNASSIGNED;
1484 pd = p->phys_offset;
1485 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1486 /* NOTE: we also allocate the page at this stage */
1487 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1488 first_tb = p1->first_tb;
1491 #if defined(DEBUG_TLB)
1492 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1493 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1497 #if !defined(CONFIG_SOFTMMU)
1501 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1502 /* IO memory case */
1503 address = vaddr | pd;
1506 /* standard memory */
1508 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1511 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1513 if (prot & PAGE_READ) {
1514 env->tlb_read[is_user][index].address = address;
1515 env->tlb_read[is_user][index].addend = addend;
1517 env->tlb_read[is_user][index].address = -1;
1518 env->tlb_read[is_user][index].addend = -1;
1520 if (prot & PAGE_WRITE) {
1521 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1522 /* ROM: access is ignored (same as unassigned) */
1523 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1524 env->tlb_write[is_user][index].addend = addend;
1526 /* XXX: the PowerPC code seems not ready to handle
1527 self modifying code with DCBI */
1528 #if defined(TARGET_HAS_SMC) || 1
1530 /* if code is present, we use a specific memory
1531 handler. It works only for physical memory access */
1532 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1533 env->tlb_write[is_user][index].addend = addend;
1536 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1537 !cpu_physical_memory_is_dirty(pd)) {
1538 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1539 env->tlb_write[is_user][index].addend = addend;
1541 env->tlb_write[is_user][index].address = address;
1542 env->tlb_write[is_user][index].addend = addend;
1545 env->tlb_write[is_user][index].address = -1;
1546 env->tlb_write[is_user][index].addend = -1;
1549 #if !defined(CONFIG_SOFTMMU)
1551 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1552 /* IO access: no mapping is done as it will be handled by the
1554 if (!(env->hflags & HF_SOFTMMU_MASK))
1559 if (vaddr >= MMAP_AREA_END) {
1562 if (prot & PROT_WRITE) {
1563 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1564 #if defined(TARGET_HAS_SMC) || 1
1567 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1568 !cpu_physical_memory_is_dirty(pd))) {
1569 /* ROM: we do as if code was inside */
1570 /* if code is present, we only map as read only and save the
1574 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1577 vp->valid_tag = virt_valid_tag;
1578 prot &= ~PAGE_WRITE;
1581 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1582 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1583 if (map_addr == MAP_FAILED) {
1584 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1594 /* called from signal handler: invalidate the code and unprotect the
1595 page. Return TRUE if the fault was succesfully handled. */
1596 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1598 #if !defined(CONFIG_SOFTMMU)
1601 #if defined(DEBUG_TLB)
1602 printf("page_unprotect: addr=0x%08x\n", addr);
1604 addr &= TARGET_PAGE_MASK;
1606 /* if it is not mapped, no need to worry here */
1607 if (addr >= MMAP_AREA_END)
1609 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1612 /* NOTE: in this case, validate_tag is _not_ tested as it
1613 validates only the code TLB */
1614 if (vp->valid_tag != virt_valid_tag)
1616 if (!(vp->prot & PAGE_WRITE))
1618 #if defined(DEBUG_TLB)
1619 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1620 addr, vp->phys_addr, vp->prot);
1622 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1623 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1624 (unsigned long)addr, vp->prot);
1625 /* set the dirty bit */
1626 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1627 /* flush the code inside */
1628 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1637 void tlb_flush(CPUState *env, int flush_global)
1641 void tlb_flush_page(CPUState *env, target_ulong addr)
1645 int tlb_set_page(CPUState *env, target_ulong vaddr,
1646 target_phys_addr_t paddr, int prot,
1647 int is_user, int is_softmmu)
1652 /* dump memory mappings */
1653 void page_dump(FILE *f)
1655 unsigned long start, end;
1656 int i, j, prot, prot1;
1659 fprintf(f, "%-8s %-8s %-8s %s\n",
1660 "start", "end", "size", "prot");
1664 for(i = 0; i <= L1_SIZE; i++) {
1669 for(j = 0;j < L2_SIZE; j++) {
1674 if (prot1 != prot) {
1675 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1677 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1678 start, end, end - start,
1679 prot & PAGE_READ ? 'r' : '-',
1680 prot & PAGE_WRITE ? 'w' : '-',
1681 prot & PAGE_EXEC ? 'x' : '-');
1695 int page_get_flags(unsigned long address)
1699 p = page_find(address >> TARGET_PAGE_BITS);
1705 /* modify the flags of a page and invalidate the code if
1706 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1707 depending on PAGE_WRITE */
1708 void page_set_flags(unsigned long start, unsigned long end, int flags)
1713 start = start & TARGET_PAGE_MASK;
1714 end = TARGET_PAGE_ALIGN(end);
1715 if (flags & PAGE_WRITE)
1716 flags |= PAGE_WRITE_ORG;
1717 spin_lock(&tb_lock);
1718 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1719 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1720 /* if the write protection is set, then we invalidate the code
1722 if (!(p->flags & PAGE_WRITE) &&
1723 (flags & PAGE_WRITE) &&
1725 tb_invalidate_phys_page(addr, 0, NULL);
1729 spin_unlock(&tb_lock);
1732 /* called from signal handler: invalidate the code and unprotect the
1733 page. Return TRUE if the fault was succesfully handled. */
1734 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1736 unsigned int page_index, prot, pindex;
1738 unsigned long host_start, host_end, addr;
1740 host_start = address & qemu_host_page_mask;
1741 page_index = host_start >> TARGET_PAGE_BITS;
1742 p1 = page_find(page_index);
1745 host_end = host_start + qemu_host_page_size;
1748 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1752 /* if the page was really writable, then we change its
1753 protection back to writable */
1754 if (prot & PAGE_WRITE_ORG) {
1755 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1756 if (!(p1[pindex].flags & PAGE_WRITE)) {
1757 mprotect((void *)host_start, qemu_host_page_size,
1758 (prot & PAGE_BITS) | PAGE_WRITE);
1759 p1[pindex].flags |= PAGE_WRITE;
1760 /* and since the content will be modified, we must invalidate
1761 the corresponding translated code. */
1762 tb_invalidate_phys_page(address, pc, puc);
1763 #ifdef DEBUG_TB_CHECK
1764 tb_invalidate_check(address);
1772 /* call this function when system calls directly modify a memory area */
1773 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1775 unsigned long start, end, addr;
1777 start = (unsigned long)data;
1778 end = start + data_size;
1779 start &= TARGET_PAGE_MASK;
1780 end = TARGET_PAGE_ALIGN(end);
1781 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1782 page_unprotect(addr, 0, NULL);
1786 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1789 #endif /* defined(CONFIG_USER_ONLY) */
1791 /* register physical memory. 'size' must be a multiple of the target
1792 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1794 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1796 unsigned long phys_offset)
1798 unsigned long addr, end_addr;
1801 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1802 end_addr = start_addr + size;
1803 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1804 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1805 p->phys_offset = phys_offset;
1806 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1807 phys_offset += TARGET_PAGE_SIZE;
1811 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1816 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1820 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1821 unassigned_mem_readb,
1822 unassigned_mem_readb,
1823 unassigned_mem_readb,
1826 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1827 unassigned_mem_writeb,
1828 unassigned_mem_writeb,
1829 unassigned_mem_writeb,
1832 /* self modifying code support in soft mmu mode : writing to a page
1833 containing code comes to these functions */
1835 static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1837 unsigned long phys_addr;
1839 phys_addr = addr - (unsigned long)phys_ram_base;
1840 #if !defined(CONFIG_USER_ONLY)
1841 tb_invalidate_phys_page_fast(phys_addr, 1);
1843 stb_raw((uint8_t *)addr, val);
1844 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1847 static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1849 unsigned long phys_addr;
1851 phys_addr = addr - (unsigned long)phys_ram_base;
1852 #if !defined(CONFIG_USER_ONLY)
1853 tb_invalidate_phys_page_fast(phys_addr, 2);
1855 stw_raw((uint8_t *)addr, val);
1856 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1859 static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1861 unsigned long phys_addr;
1863 phys_addr = addr - (unsigned long)phys_ram_base;
1864 #if !defined(CONFIG_USER_ONLY)
1865 tb_invalidate_phys_page_fast(phys_addr, 4);
1867 stl_raw((uint8_t *)addr, val);
1868 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1871 static CPUReadMemoryFunc *code_mem_read[3] = {
1872 NULL, /* never used */
1873 NULL, /* never used */
1874 NULL, /* never used */
1877 static CPUWriteMemoryFunc *code_mem_write[3] = {
1883 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1885 stb_raw((uint8_t *)addr, val);
1886 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1889 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1891 stw_raw((uint8_t *)addr, val);
1892 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1895 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1897 stl_raw((uint8_t *)addr, val);
1898 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1901 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1902 notdirty_mem_writeb,
1903 notdirty_mem_writew,
1904 notdirty_mem_writel,
1907 static void io_mem_init(void)
1909 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1910 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1911 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1912 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1915 /* alloc dirty bits array */
1916 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1919 /* mem_read and mem_write are arrays of functions containing the
1920 function to access byte (index 0), word (index 1) and dword (index
1921 2). All functions must be supplied. If io_index is non zero, the
1922 corresponding io zone is modified. If it is zero, a new io zone is
1923 allocated. The return value can be used with
1924 cpu_register_physical_memory(). (-1) is returned if error. */
1925 int cpu_register_io_memory(int io_index,
1926 CPUReadMemoryFunc **mem_read,
1927 CPUWriteMemoryFunc **mem_write,
1932 if (io_index <= 0) {
1933 if (io_index >= IO_MEM_NB_ENTRIES)
1935 io_index = io_mem_nb++;
1937 if (io_index >= IO_MEM_NB_ENTRIES)
1941 for(i = 0;i < 3; i++) {
1942 io_mem_read[io_index][i] = mem_read[i];
1943 io_mem_write[io_index][i] = mem_write[i];
1945 io_mem_opaque[io_index] = opaque;
1946 return io_index << IO_MEM_SHIFT;
1949 /* physical memory access (slow version, mainly for debug) */
1950 #if defined(CONFIG_USER_ONLY)
1951 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1952 int len, int is_write)
1958 page = addr & TARGET_PAGE_MASK;
1959 l = (page + TARGET_PAGE_SIZE) - addr;
1962 flags = page_get_flags(page);
1963 if (!(flags & PAGE_VALID))
1966 if (!(flags & PAGE_WRITE))
1968 memcpy((uint8_t *)addr, buf, len);
1970 if (!(flags & PAGE_READ))
1972 memcpy(buf, (uint8_t *)addr, len);
1980 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1981 int len, int is_write)
1986 target_phys_addr_t page;
1991 page = addr & TARGET_PAGE_MASK;
1992 l = (page + TARGET_PAGE_SIZE) - addr;
1995 p = phys_page_find(page >> TARGET_PAGE_BITS);
1997 pd = IO_MEM_UNASSIGNED;
1999 pd = p->phys_offset;
2003 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2004 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2005 if (l >= 4 && ((addr & 3) == 0)) {
2006 /* 32 bit read access */
2008 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2010 } else if (l >= 2 && ((addr & 1) == 0)) {
2011 /* 16 bit read access */
2012 val = lduw_raw(buf);
2013 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2017 val = ldub_raw(buf);
2018 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2022 unsigned long addr1;
2023 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2025 ptr = phys_ram_base + addr1;
2026 memcpy(ptr, buf, l);
2027 /* invalidate code */
2028 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2030 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
2033 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2034 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2036 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2037 if (l >= 4 && ((addr & 3) == 0)) {
2038 /* 32 bit read access */
2039 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2042 } else if (l >= 2 && ((addr & 1) == 0)) {
2043 /* 16 bit read access */
2044 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2049 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2055 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2056 (addr & ~TARGET_PAGE_MASK);
2057 memcpy(buf, ptr, l);
2067 /* virtual memory access for debug */
2068 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2069 uint8_t *buf, int len, int is_write)
2072 target_ulong page, phys_addr;
2075 page = addr & TARGET_PAGE_MASK;
2076 phys_addr = cpu_get_phys_page_debug(env, page);
2077 /* if no physical page mapped, return an error */
2078 if (phys_addr == -1)
2080 l = (page + TARGET_PAGE_SIZE) - addr;
2083 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2092 #if !defined(CONFIG_USER_ONLY)
2094 #define MMUSUFFIX _cmmu
2095 #define GETPC() NULL
2096 #define env cpu_single_env
2099 #include "softmmu_template.h"
2102 #include "softmmu_template.h"
2105 #include "softmmu_template.h"
2108 #include "softmmu_template.h"