2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
52 #if !defined(CONFIG_USER_ONLY)
53 /* TB consistency checks only implemented for usermode emulation. */
57 /* threshold to flush the translated code buffer */
58 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60 #define SMC_BITMAP_USE_THRESHOLD 10
62 #define MMAP_AREA_START 0x00000000
63 #define MMAP_AREA_END 0xa8000000
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_ALPHA)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 42
69 #define TARGET_VIRT_ADDR_SPACE_BITS 42
70 #elif defined(TARGET_PPC64)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
73 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
74 #define TARGET_PHYS_ADDR_SPACE_BITS 32
77 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
78 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
80 /* any access to the tbs or the page table must use this lock */
81 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
83 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
84 uint8_t *code_gen_ptr;
88 uint8_t *phys_ram_base;
89 uint8_t *phys_ram_dirty;
90 static ram_addr_t phys_ram_alloc_offset = 0;
93 /* current CPU in the current thread. It is only valid inside
95 CPUState *cpu_single_env;
97 typedef struct PageDesc {
98 /* list of TBs intersecting this ram page */
99 TranslationBlock *first_tb;
100 /* in order to optimize self modifying code, we count the number
101 of lookups we do to a given page to use a bitmap */
102 unsigned int code_write_count;
103 uint8_t *code_bitmap;
104 #if defined(CONFIG_USER_ONLY)
109 typedef struct PhysPageDesc {
110 /* offset in host memory of the page + io_index in the low 12 bits */
111 uint32_t phys_offset;
115 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
116 /* XXX: this is a temporary hack for alpha target.
117 * In the future, this is to be replaced by a multi-level table
118 * to actually be able to handle the complete 64 bits address space.
120 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
122 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
125 #define L1_SIZE (1 << L1_BITS)
126 #define L2_SIZE (1 << L2_BITS)
128 static void io_mem_init(void);
130 unsigned long qemu_real_host_page_size;
131 unsigned long qemu_host_page_bits;
132 unsigned long qemu_host_page_size;
133 unsigned long qemu_host_page_mask;
135 /* XXX: for system emulation, it could just be an array */
136 static PageDesc *l1_map[L1_SIZE];
137 PhysPageDesc **l1_phys_map;
139 /* io memory support */
140 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
141 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
142 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
143 static int io_mem_nb;
144 #if defined(CONFIG_SOFTMMU)
145 static int io_mem_watch;
149 char *logfilename = "/tmp/qemu.log";
154 static int tlb_flush_count;
155 static int tb_flush_count;
156 static int tb_phys_invalidate_count;
158 static void page_init(void)
160 /* NOTE: we can always suppose that qemu_host_page_size >=
164 SYSTEM_INFO system_info;
167 GetSystemInfo(&system_info);
168 qemu_real_host_page_size = system_info.dwPageSize;
170 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
171 PAGE_EXECUTE_READWRITE, &old_protect);
174 qemu_real_host_page_size = getpagesize();
176 unsigned long start, end;
178 start = (unsigned long)code_gen_buffer;
179 start &= ~(qemu_real_host_page_size - 1);
181 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
182 end += qemu_real_host_page_size - 1;
183 end &= ~(qemu_real_host_page_size - 1);
185 mprotect((void *)start, end - start,
186 PROT_READ | PROT_WRITE | PROT_EXEC);
190 if (qemu_host_page_size == 0)
191 qemu_host_page_size = qemu_real_host_page_size;
192 if (qemu_host_page_size < TARGET_PAGE_SIZE)
193 qemu_host_page_size = TARGET_PAGE_SIZE;
194 qemu_host_page_bits = 0;
195 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
196 qemu_host_page_bits++;
197 qemu_host_page_mask = ~(qemu_host_page_size - 1);
198 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
199 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
202 static inline PageDesc *page_find_alloc(unsigned int index)
206 lp = &l1_map[index >> L2_BITS];
209 /* allocate if not found */
210 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
211 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
214 return p + (index & (L2_SIZE - 1));
217 static inline PageDesc *page_find(unsigned int index)
221 p = l1_map[index >> L2_BITS];
224 return p + (index & (L2_SIZE - 1));
227 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
232 p = (void **)l1_phys_map;
233 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
235 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
236 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
238 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
241 /* allocate if not found */
244 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
245 memset(p, 0, sizeof(void *) * L1_SIZE);
249 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
253 /* allocate if not found */
256 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
258 for (i = 0; i < L2_SIZE; i++)
259 pd[i].phys_offset = IO_MEM_UNASSIGNED;
261 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
264 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
266 return phys_page_find_alloc(index, 0);
269 #if !defined(CONFIG_USER_ONLY)
270 static void tlb_protect_code(ram_addr_t ram_addr);
271 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
275 void cpu_exec_init(CPUState *env)
281 code_gen_ptr = code_gen_buffer;
285 env->next_cpu = NULL;
288 while (*penv != NULL) {
289 penv = (CPUState **)&(*penv)->next_cpu;
292 env->cpu_index = cpu_index;
293 env->nb_watchpoints = 0;
297 static inline void invalidate_page_bitmap(PageDesc *p)
299 if (p->code_bitmap) {
300 qemu_free(p->code_bitmap);
301 p->code_bitmap = NULL;
303 p->code_write_count = 0;
306 /* set to NULL all the 'first_tb' fields in all PageDescs */
307 static void page_flush_tb(void)
312 for(i = 0; i < L1_SIZE; i++) {
315 for(j = 0; j < L2_SIZE; j++) {
317 invalidate_page_bitmap(p);
324 /* flush all the translation blocks */
325 /* XXX: tb_flush is currently not thread safe */
326 void tb_flush(CPUState *env1)
329 #if defined(DEBUG_FLUSH)
330 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
331 code_gen_ptr - code_gen_buffer,
333 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
337 for(env = first_cpu; env != NULL; env = env->next_cpu) {
338 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
341 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
344 code_gen_ptr = code_gen_buffer;
345 /* XXX: flush processor icache at this point if cache flush is
350 #ifdef DEBUG_TB_CHECK
352 static void tb_invalidate_check(target_ulong address)
354 TranslationBlock *tb;
356 address &= TARGET_PAGE_MASK;
357 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
358 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
359 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
360 address >= tb->pc + tb->size)) {
361 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
362 address, (long)tb->pc, tb->size);
368 /* verify that all the pages have correct rights for code */
369 static void tb_page_check(void)
371 TranslationBlock *tb;
372 int i, flags1, flags2;
374 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
375 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
376 flags1 = page_get_flags(tb->pc);
377 flags2 = page_get_flags(tb->pc + tb->size - 1);
378 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
379 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
380 (long)tb->pc, tb->size, flags1, flags2);
386 void tb_jmp_check(TranslationBlock *tb)
388 TranslationBlock *tb1;
391 /* suppress any remaining jumps to this TB */
395 tb1 = (TranslationBlock *)((long)tb1 & ~3);
398 tb1 = tb1->jmp_next[n1];
400 /* check end of list */
402 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
408 /* invalidate one TB */
409 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
412 TranslationBlock *tb1;
416 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
419 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
423 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
425 TranslationBlock *tb1;
431 tb1 = (TranslationBlock *)((long)tb1 & ~3);
433 *ptb = tb1->page_next[n1];
436 ptb = &tb1->page_next[n1];
440 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
442 TranslationBlock *tb1, **ptb;
445 ptb = &tb->jmp_next[n];
448 /* find tb(n) in circular list */
452 tb1 = (TranslationBlock *)((long)tb1 & ~3);
453 if (n1 == n && tb1 == tb)
456 ptb = &tb1->jmp_first;
458 ptb = &tb1->jmp_next[n1];
461 /* now we can suppress tb(n) from the list */
462 *ptb = tb->jmp_next[n];
464 tb->jmp_next[n] = NULL;
468 /* reset the jump entry 'n' of a TB so that it is not chained to
470 static inline void tb_reset_jump(TranslationBlock *tb, int n)
472 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
475 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
480 target_ulong phys_pc;
481 TranslationBlock *tb1, *tb2;
483 /* remove the TB from the hash list */
484 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
485 h = tb_phys_hash_func(phys_pc);
486 tb_remove(&tb_phys_hash[h], tb,
487 offsetof(TranslationBlock, phys_hash_next));
489 /* remove the TB from the page list */
490 if (tb->page_addr[0] != page_addr) {
491 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
492 tb_page_remove(&p->first_tb, tb);
493 invalidate_page_bitmap(p);
495 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
496 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
497 tb_page_remove(&p->first_tb, tb);
498 invalidate_page_bitmap(p);
501 tb_invalidated_flag = 1;
503 /* remove the TB from the hash list */
504 h = tb_jmp_cache_hash_func(tb->pc);
505 for(env = first_cpu; env != NULL; env = env->next_cpu) {
506 if (env->tb_jmp_cache[h] == tb)
507 env->tb_jmp_cache[h] = NULL;
510 /* suppress this TB from the two jump lists */
511 tb_jmp_remove(tb, 0);
512 tb_jmp_remove(tb, 1);
514 /* suppress any remaining jumps to this TB */
520 tb1 = (TranslationBlock *)((long)tb1 & ~3);
521 tb2 = tb1->jmp_next[n1];
522 tb_reset_jump(tb1, n1);
523 tb1->jmp_next[n1] = NULL;
526 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
528 tb_phys_invalidate_count++;
531 static inline void set_bits(uint8_t *tab, int start, int len)
537 mask = 0xff << (start & 7);
538 if ((start & ~7) == (end & ~7)) {
540 mask &= ~(0xff << (end & 7));
545 start = (start + 8) & ~7;
547 while (start < end1) {
552 mask = ~(0xff << (end & 7));
558 static void build_page_bitmap(PageDesc *p)
560 int n, tb_start, tb_end;
561 TranslationBlock *tb;
563 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
566 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
571 tb = (TranslationBlock *)((long)tb & ~3);
572 /* NOTE: this is subtle as a TB may span two physical pages */
574 /* NOTE: tb_end may be after the end of the page, but
575 it is not a problem */
576 tb_start = tb->pc & ~TARGET_PAGE_MASK;
577 tb_end = tb_start + tb->size;
578 if (tb_end > TARGET_PAGE_SIZE)
579 tb_end = TARGET_PAGE_SIZE;
582 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
584 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
585 tb = tb->page_next[n];
589 #ifdef TARGET_HAS_PRECISE_SMC
591 static void tb_gen_code(CPUState *env,
592 target_ulong pc, target_ulong cs_base, int flags,
595 TranslationBlock *tb;
597 target_ulong phys_pc, phys_page2, virt_page2;
600 phys_pc = get_phys_addr_code(env, pc);
603 /* flush must be done */
605 /* cannot fail at this point */
608 tc_ptr = code_gen_ptr;
610 tb->cs_base = cs_base;
613 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
614 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
616 /* check next page if needed */
617 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
619 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
620 phys_page2 = get_phys_addr_code(env, virt_page2);
622 tb_link_phys(tb, phys_pc, phys_page2);
626 /* invalidate all TBs which intersect with the target physical page
627 starting in range [start;end[. NOTE: start and end must refer to
628 the same physical page. 'is_cpu_write_access' should be true if called
629 from a real cpu write access: the virtual CPU will exit the current
630 TB if code is modified inside this TB. */
631 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
632 int is_cpu_write_access)
634 int n, current_tb_modified, current_tb_not_found, current_flags;
635 CPUState *env = cpu_single_env;
637 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
638 target_ulong tb_start, tb_end;
639 target_ulong current_pc, current_cs_base;
641 p = page_find(start >> TARGET_PAGE_BITS);
644 if (!p->code_bitmap &&
645 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
646 is_cpu_write_access) {
647 /* build code bitmap */
648 build_page_bitmap(p);
651 /* we remove all the TBs in the range [start, end[ */
652 /* XXX: see if in some cases it could be faster to invalidate all the code */
653 current_tb_not_found = is_cpu_write_access;
654 current_tb_modified = 0;
655 current_tb = NULL; /* avoid warning */
656 current_pc = 0; /* avoid warning */
657 current_cs_base = 0; /* avoid warning */
658 current_flags = 0; /* avoid warning */
662 tb = (TranslationBlock *)((long)tb & ~3);
663 tb_next = tb->page_next[n];
664 /* NOTE: this is subtle as a TB may span two physical pages */
666 /* NOTE: tb_end may be after the end of the page, but
667 it is not a problem */
668 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
669 tb_end = tb_start + tb->size;
671 tb_start = tb->page_addr[1];
672 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
674 if (!(tb_end <= start || tb_start >= end)) {
675 #ifdef TARGET_HAS_PRECISE_SMC
676 if (current_tb_not_found) {
677 current_tb_not_found = 0;
679 if (env->mem_write_pc) {
680 /* now we have a real cpu fault */
681 current_tb = tb_find_pc(env->mem_write_pc);
684 if (current_tb == tb &&
685 !(current_tb->cflags & CF_SINGLE_INSN)) {
686 /* If we are modifying the current TB, we must stop
687 its execution. We could be more precise by checking
688 that the modification is after the current PC, but it
689 would require a specialized function to partially
690 restore the CPU state */
692 current_tb_modified = 1;
693 cpu_restore_state(current_tb, env,
694 env->mem_write_pc, NULL);
695 #if defined(TARGET_I386)
696 current_flags = env->hflags;
697 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
698 current_cs_base = (target_ulong)env->segs[R_CS].base;
699 current_pc = current_cs_base + env->eip;
701 #error unsupported CPU
704 #endif /* TARGET_HAS_PRECISE_SMC */
705 /* we need to do that to handle the case where a signal
706 occurs while doing tb_phys_invalidate() */
709 saved_tb = env->current_tb;
710 env->current_tb = NULL;
712 tb_phys_invalidate(tb, -1);
714 env->current_tb = saved_tb;
715 if (env->interrupt_request && env->current_tb)
716 cpu_interrupt(env, env->interrupt_request);
721 #if !defined(CONFIG_USER_ONLY)
722 /* if no code remaining, no need to continue to use slow writes */
724 invalidate_page_bitmap(p);
725 if (is_cpu_write_access) {
726 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
730 #ifdef TARGET_HAS_PRECISE_SMC
731 if (current_tb_modified) {
732 /* we generate a block containing just the instruction
733 modifying the memory. It will ensure that it cannot modify
735 env->current_tb = NULL;
736 tb_gen_code(env, current_pc, current_cs_base, current_flags,
738 cpu_resume_from_signal(env, NULL);
743 /* len must be <= 8 and start must be a multiple of len */
744 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
751 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
752 cpu_single_env->mem_write_vaddr, len,
754 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
758 p = page_find(start >> TARGET_PAGE_BITS);
761 if (p->code_bitmap) {
762 offset = start & ~TARGET_PAGE_MASK;
763 b = p->code_bitmap[offset >> 3] >> (offset & 7);
764 if (b & ((1 << len) - 1))
768 tb_invalidate_phys_page_range(start, start + len, 1);
772 #if !defined(CONFIG_SOFTMMU)
773 static void tb_invalidate_phys_page(target_ulong addr,
774 unsigned long pc, void *puc)
776 int n, current_flags, current_tb_modified;
777 target_ulong current_pc, current_cs_base;
779 TranslationBlock *tb, *current_tb;
780 #ifdef TARGET_HAS_PRECISE_SMC
781 CPUState *env = cpu_single_env;
784 addr &= TARGET_PAGE_MASK;
785 p = page_find(addr >> TARGET_PAGE_BITS);
789 current_tb_modified = 0;
791 current_pc = 0; /* avoid warning */
792 current_cs_base = 0; /* avoid warning */
793 current_flags = 0; /* avoid warning */
794 #ifdef TARGET_HAS_PRECISE_SMC
796 current_tb = tb_find_pc(pc);
801 tb = (TranslationBlock *)((long)tb & ~3);
802 #ifdef TARGET_HAS_PRECISE_SMC
803 if (current_tb == tb &&
804 !(current_tb->cflags & CF_SINGLE_INSN)) {
805 /* If we are modifying the current TB, we must stop
806 its execution. We could be more precise by checking
807 that the modification is after the current PC, but it
808 would require a specialized function to partially
809 restore the CPU state */
811 current_tb_modified = 1;
812 cpu_restore_state(current_tb, env, pc, puc);
813 #if defined(TARGET_I386)
814 current_flags = env->hflags;
815 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
816 current_cs_base = (target_ulong)env->segs[R_CS].base;
817 current_pc = current_cs_base + env->eip;
819 #error unsupported CPU
822 #endif /* TARGET_HAS_PRECISE_SMC */
823 tb_phys_invalidate(tb, addr);
824 tb = tb->page_next[n];
827 #ifdef TARGET_HAS_PRECISE_SMC
828 if (current_tb_modified) {
829 /* we generate a block containing just the instruction
830 modifying the memory. It will ensure that it cannot modify
832 env->current_tb = NULL;
833 tb_gen_code(env, current_pc, current_cs_base, current_flags,
835 cpu_resume_from_signal(env, puc);
841 /* add the tb in the target page and protect it if necessary */
842 static inline void tb_alloc_page(TranslationBlock *tb,
843 unsigned int n, target_ulong page_addr)
846 TranslationBlock *last_first_tb;
848 tb->page_addr[n] = page_addr;
849 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
850 tb->page_next[n] = p->first_tb;
851 last_first_tb = p->first_tb;
852 p->first_tb = (TranslationBlock *)((long)tb | n);
853 invalidate_page_bitmap(p);
855 #if defined(TARGET_HAS_SMC) || 1
857 #if defined(CONFIG_USER_ONLY)
858 if (p->flags & PAGE_WRITE) {
863 /* force the host page as non writable (writes will have a
864 page fault + mprotect overhead) */
865 page_addr &= qemu_host_page_mask;
867 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
868 addr += TARGET_PAGE_SIZE) {
870 p2 = page_find (addr >> TARGET_PAGE_BITS);
874 p2->flags &= ~PAGE_WRITE;
875 page_get_flags(addr);
877 mprotect(g2h(page_addr), qemu_host_page_size,
878 (prot & PAGE_BITS) & ~PAGE_WRITE);
879 #ifdef DEBUG_TB_INVALIDATE
880 printf("protecting code page: 0x%08lx\n",
885 /* if some code is already present, then the pages are already
886 protected. So we handle the case where only the first TB is
887 allocated in a physical page */
888 if (!last_first_tb) {
889 tlb_protect_code(page_addr);
893 #endif /* TARGET_HAS_SMC */
896 /* Allocate a new translation block. Flush the translation buffer if
897 too many translation blocks or too much generated code. */
898 TranslationBlock *tb_alloc(target_ulong pc)
900 TranslationBlock *tb;
902 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
903 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
911 /* add a new TB and link it to the physical page tables. phys_page2 is
912 (-1) to indicate that only one page contains the TB. */
913 void tb_link_phys(TranslationBlock *tb,
914 target_ulong phys_pc, target_ulong phys_page2)
917 TranslationBlock **ptb;
919 /* add in the physical hash table */
920 h = tb_phys_hash_func(phys_pc);
921 ptb = &tb_phys_hash[h];
922 tb->phys_hash_next = *ptb;
925 /* add in the page list */
926 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
927 if (phys_page2 != -1)
928 tb_alloc_page(tb, 1, phys_page2);
930 tb->page_addr[1] = -1;
932 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
933 tb->jmp_next[0] = NULL;
934 tb->jmp_next[1] = NULL;
936 tb->cflags &= ~CF_FP_USED;
937 if (tb->cflags & CF_TB_FP_USED)
938 tb->cflags |= CF_FP_USED;
941 /* init original jump addresses */
942 if (tb->tb_next_offset[0] != 0xffff)
943 tb_reset_jump(tb, 0);
944 if (tb->tb_next_offset[1] != 0xffff)
945 tb_reset_jump(tb, 1);
947 #ifdef DEBUG_TB_CHECK
952 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
953 tb[1].tc_ptr. Return NULL if not found */
954 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
958 TranslationBlock *tb;
962 if (tc_ptr < (unsigned long)code_gen_buffer ||
963 tc_ptr >= (unsigned long)code_gen_ptr)
965 /* binary search (cf Knuth) */
968 while (m_min <= m_max) {
969 m = (m_min + m_max) >> 1;
971 v = (unsigned long)tb->tc_ptr;
974 else if (tc_ptr < v) {
983 static void tb_reset_jump_recursive(TranslationBlock *tb);
985 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
987 TranslationBlock *tb1, *tb_next, **ptb;
990 tb1 = tb->jmp_next[n];
992 /* find head of list */
995 tb1 = (TranslationBlock *)((long)tb1 & ~3);
998 tb1 = tb1->jmp_next[n1];
1000 /* we are now sure now that tb jumps to tb1 */
1003 /* remove tb from the jmp_first list */
1004 ptb = &tb_next->jmp_first;
1008 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1009 if (n1 == n && tb1 == tb)
1011 ptb = &tb1->jmp_next[n1];
1013 *ptb = tb->jmp_next[n];
1014 tb->jmp_next[n] = NULL;
1016 /* suppress the jump to next tb in generated code */
1017 tb_reset_jump(tb, n);
1019 /* suppress jumps in the tb on which we could have jumped */
1020 tb_reset_jump_recursive(tb_next);
1024 static void tb_reset_jump_recursive(TranslationBlock *tb)
1026 tb_reset_jump_recursive2(tb, 0);
1027 tb_reset_jump_recursive2(tb, 1);
1030 #if defined(TARGET_HAS_ICE)
1031 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1033 target_ulong addr, pd;
1034 ram_addr_t ram_addr;
1037 addr = cpu_get_phys_page_debug(env, pc);
1038 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1040 pd = IO_MEM_UNASSIGNED;
1042 pd = p->phys_offset;
1044 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1045 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1049 /* Add a watchpoint. */
1050 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1054 for (i = 0; i < env->nb_watchpoints; i++) {
1055 if (addr == env->watchpoint[i].vaddr)
1058 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1061 i = env->nb_watchpoints++;
1062 env->watchpoint[i].vaddr = addr;
1063 tlb_flush_page(env, addr);
1064 /* FIXME: This flush is needed because of the hack to make memory ops
1065 terminate the TB. It can be removed once the proper IO trap and
1066 re-execute bits are in. */
1071 /* Remove a watchpoint. */
1072 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1076 for (i = 0; i < env->nb_watchpoints; i++) {
1077 if (addr == env->watchpoint[i].vaddr) {
1078 env->nb_watchpoints--;
1079 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1080 tlb_flush_page(env, addr);
1087 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1088 breakpoint is reached */
1089 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1091 #if defined(TARGET_HAS_ICE)
1094 for(i = 0; i < env->nb_breakpoints; i++) {
1095 if (env->breakpoints[i] == pc)
1099 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1101 env->breakpoints[env->nb_breakpoints++] = pc;
1103 breakpoint_invalidate(env, pc);
1110 /* remove a breakpoint */
1111 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1113 #if defined(TARGET_HAS_ICE)
1115 for(i = 0; i < env->nb_breakpoints; i++) {
1116 if (env->breakpoints[i] == pc)
1121 env->nb_breakpoints--;
1122 if (i < env->nb_breakpoints)
1123 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1125 breakpoint_invalidate(env, pc);
1132 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1133 CPU loop after each instruction */
1134 void cpu_single_step(CPUState *env, int enabled)
1136 #if defined(TARGET_HAS_ICE)
1137 if (env->singlestep_enabled != enabled) {
1138 env->singlestep_enabled = enabled;
1139 /* must flush all the translated code to avoid inconsistancies */
1140 /* XXX: only flush what is necessary */
1146 /* enable or disable low levels log */
1147 void cpu_set_log(int log_flags)
1149 loglevel = log_flags;
1150 if (loglevel && !logfile) {
1151 logfile = fopen(logfilename, "w");
1153 perror(logfilename);
1156 #if !defined(CONFIG_SOFTMMU)
1157 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1159 static uint8_t logfile_buf[4096];
1160 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1163 setvbuf(logfile, NULL, _IOLBF, 0);
1168 void cpu_set_log_filename(const char *filename)
1170 logfilename = strdup(filename);
1173 /* mask must never be zero, except for A20 change call */
1174 void cpu_interrupt(CPUState *env, int mask)
1176 TranslationBlock *tb;
1177 static int interrupt_lock;
1179 env->interrupt_request |= mask;
1180 /* if the cpu is currently executing code, we must unlink it and
1181 all the potentially executing TB */
1182 tb = env->current_tb;
1183 if (tb && !testandset(&interrupt_lock)) {
1184 env->current_tb = NULL;
1185 tb_reset_jump_recursive(tb);
1190 void cpu_reset_interrupt(CPUState *env, int mask)
1192 env->interrupt_request &= ~mask;
1195 CPULogItem cpu_log_items[] = {
1196 { CPU_LOG_TB_OUT_ASM, "out_asm",
1197 "show generated host assembly code for each compiled TB" },
1198 { CPU_LOG_TB_IN_ASM, "in_asm",
1199 "show target assembly code for each compiled TB" },
1200 { CPU_LOG_TB_OP, "op",
1201 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1203 { CPU_LOG_TB_OP_OPT, "op_opt",
1204 "show micro ops after optimization for each compiled TB" },
1206 { CPU_LOG_INT, "int",
1207 "show interrupts/exceptions in short format" },
1208 { CPU_LOG_EXEC, "exec",
1209 "show trace before each executed TB (lots of logs)" },
1210 { CPU_LOG_TB_CPU, "cpu",
1211 "show CPU state before bloc translation" },
1213 { CPU_LOG_PCALL, "pcall",
1214 "show protected mode far calls/returns/exceptions" },
1217 { CPU_LOG_IOPORT, "ioport",
1218 "show all i/o ports accesses" },
1223 static int cmp1(const char *s1, int n, const char *s2)
1225 if (strlen(s2) != n)
1227 return memcmp(s1, s2, n) == 0;
1230 /* takes a comma separated list of log masks. Return 0 if error. */
1231 int cpu_str_to_log_mask(const char *str)
1240 p1 = strchr(p, ',');
1243 if(cmp1(p,p1-p,"all")) {
1244 for(item = cpu_log_items; item->mask != 0; item++) {
1248 for(item = cpu_log_items; item->mask != 0; item++) {
1249 if (cmp1(p, p1 - p, item->name))
1263 void cpu_abort(CPUState *env, const char *fmt, ...)
1268 fprintf(stderr, "qemu: fatal: ");
1269 vfprintf(stderr, fmt, ap);
1270 fprintf(stderr, "\n");
1272 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1274 cpu_dump_state(env, stderr, fprintf, 0);
1280 CPUState *cpu_copy(CPUState *env)
1282 CPUState *new_env = cpu_init();
1283 /* preserve chaining and index */
1284 CPUState *next_cpu = new_env->next_cpu;
1285 int cpu_index = new_env->cpu_index;
1286 memcpy(new_env, env, sizeof(CPUState));
1287 new_env->next_cpu = next_cpu;
1288 new_env->cpu_index = cpu_index;
1292 #if !defined(CONFIG_USER_ONLY)
1294 /* NOTE: if flush_global is true, also flush global entries (not
1296 void tlb_flush(CPUState *env, int flush_global)
1300 #if defined(DEBUG_TLB)
1301 printf("tlb_flush:\n");
1303 /* must reset current TB so that interrupts cannot modify the
1304 links while we are modifying them */
1305 env->current_tb = NULL;
1307 for(i = 0; i < CPU_TLB_SIZE; i++) {
1308 env->tlb_table[0][i].addr_read = -1;
1309 env->tlb_table[0][i].addr_write = -1;
1310 env->tlb_table[0][i].addr_code = -1;
1311 env->tlb_table[1][i].addr_read = -1;
1312 env->tlb_table[1][i].addr_write = -1;
1313 env->tlb_table[1][i].addr_code = -1;
1314 #if (NB_MMU_MODES >= 3)
1315 env->tlb_table[2][i].addr_read = -1;
1316 env->tlb_table[2][i].addr_write = -1;
1317 env->tlb_table[2][i].addr_code = -1;
1318 #if (NB_MMU_MODES == 4)
1319 env->tlb_table[3][i].addr_read = -1;
1320 env->tlb_table[3][i].addr_write = -1;
1321 env->tlb_table[3][i].addr_code = -1;
1326 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1328 #if !defined(CONFIG_SOFTMMU)
1329 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1332 if (env->kqemu_enabled) {
1333 kqemu_flush(env, flush_global);
1339 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1341 if (addr == (tlb_entry->addr_read &
1342 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1343 addr == (tlb_entry->addr_write &
1344 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1345 addr == (tlb_entry->addr_code &
1346 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1347 tlb_entry->addr_read = -1;
1348 tlb_entry->addr_write = -1;
1349 tlb_entry->addr_code = -1;
1353 void tlb_flush_page(CPUState *env, target_ulong addr)
1356 TranslationBlock *tb;
1358 #if defined(DEBUG_TLB)
1359 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1361 /* must reset current TB so that interrupts cannot modify the
1362 links while we are modifying them */
1363 env->current_tb = NULL;
1365 addr &= TARGET_PAGE_MASK;
1366 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1367 tlb_flush_entry(&env->tlb_table[0][i], addr);
1368 tlb_flush_entry(&env->tlb_table[1][i], addr);
1369 #if (NB_MMU_MODES >= 3)
1370 tlb_flush_entry(&env->tlb_table[2][i], addr);
1371 #if (NB_MMU_MODES == 4)
1372 tlb_flush_entry(&env->tlb_table[3][i], addr);
1376 /* Discard jump cache entries for any tb which might potentially
1377 overlap the flushed page. */
1378 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1379 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1381 i = tb_jmp_cache_hash_page(addr);
1382 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1384 #if !defined(CONFIG_SOFTMMU)
1385 if (addr < MMAP_AREA_END)
1386 munmap((void *)addr, TARGET_PAGE_SIZE);
1389 if (env->kqemu_enabled) {
1390 kqemu_flush_page(env, addr);
1395 /* update the TLBs so that writes to code in the virtual page 'addr'
1397 static void tlb_protect_code(ram_addr_t ram_addr)
1399 cpu_physical_memory_reset_dirty(ram_addr,
1400 ram_addr + TARGET_PAGE_SIZE,
1404 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1405 tested for self modifying code */
1406 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1409 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1412 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1413 unsigned long start, unsigned long length)
1416 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1417 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1418 if ((addr - start) < length) {
1419 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1424 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1428 unsigned long length, start1;
1432 start &= TARGET_PAGE_MASK;
1433 end = TARGET_PAGE_ALIGN(end);
1435 length = end - start;
1438 len = length >> TARGET_PAGE_BITS;
1440 /* XXX: should not depend on cpu context */
1442 if (env->kqemu_enabled) {
1445 for(i = 0; i < len; i++) {
1446 kqemu_set_notdirty(env, addr);
1447 addr += TARGET_PAGE_SIZE;
1451 mask = ~dirty_flags;
1452 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1453 for(i = 0; i < len; i++)
1456 /* we modify the TLB cache so that the dirty bit will be set again
1457 when accessing the range */
1458 start1 = start + (unsigned long)phys_ram_base;
1459 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1460 for(i = 0; i < CPU_TLB_SIZE; i++)
1461 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1462 for(i = 0; i < CPU_TLB_SIZE; i++)
1463 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1464 #if (NB_MMU_MODES >= 3)
1465 for(i = 0; i < CPU_TLB_SIZE; i++)
1466 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1467 #if (NB_MMU_MODES == 4)
1468 for(i = 0; i < CPU_TLB_SIZE; i++)
1469 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1474 #if !defined(CONFIG_SOFTMMU)
1475 /* XXX: this is expensive */
1481 for(i = 0; i < L1_SIZE; i++) {
1484 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1485 for(j = 0; j < L2_SIZE; j++) {
1486 if (p->valid_tag == virt_valid_tag &&
1487 p->phys_addr >= start && p->phys_addr < end &&
1488 (p->prot & PROT_WRITE)) {
1489 if (addr < MMAP_AREA_END) {
1490 mprotect((void *)addr, TARGET_PAGE_SIZE,
1491 p->prot & ~PROT_WRITE);
1494 addr += TARGET_PAGE_SIZE;
1503 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1505 ram_addr_t ram_addr;
1507 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1508 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1509 tlb_entry->addend - (unsigned long)phys_ram_base;
1510 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1511 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1516 /* update the TLB according to the current state of the dirty bits */
1517 void cpu_tlb_update_dirty(CPUState *env)
1520 for(i = 0; i < CPU_TLB_SIZE; i++)
1521 tlb_update_dirty(&env->tlb_table[0][i]);
1522 for(i = 0; i < CPU_TLB_SIZE; i++)
1523 tlb_update_dirty(&env->tlb_table[1][i]);
1524 #if (NB_MMU_MODES >= 3)
1525 for(i = 0; i < CPU_TLB_SIZE; i++)
1526 tlb_update_dirty(&env->tlb_table[2][i]);
1527 #if (NB_MMU_MODES == 4)
1528 for(i = 0; i < CPU_TLB_SIZE; i++)
1529 tlb_update_dirty(&env->tlb_table[3][i]);
1534 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1535 unsigned long start)
1538 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1539 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1540 if (addr == start) {
1541 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1546 /* update the TLB corresponding to virtual page vaddr and phys addr
1547 addr so that it is no longer dirty */
1548 static inline void tlb_set_dirty(CPUState *env,
1549 unsigned long addr, target_ulong vaddr)
1553 addr &= TARGET_PAGE_MASK;
1554 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1555 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1556 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1557 #if (NB_MMU_MODES >= 3)
1558 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1559 #if (NB_MMU_MODES == 4)
1560 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1565 /* add a new TLB entry. At most one entry for a given virtual address
1566 is permitted. Return 0 if OK or 2 if the page could not be mapped
1567 (can only happen in non SOFTMMU mode for I/O pages or pages
1568 conflicting with the host address space). */
1569 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1570 target_phys_addr_t paddr, int prot,
1571 int is_user, int is_softmmu)
1576 target_ulong address;
1577 target_phys_addr_t addend;
1582 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1584 pd = IO_MEM_UNASSIGNED;
1586 pd = p->phys_offset;
1588 #if defined(DEBUG_TLB)
1589 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1590 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1594 #if !defined(CONFIG_SOFTMMU)
1598 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1599 /* IO memory case */
1600 address = vaddr | pd;
1603 /* standard memory */
1605 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1608 /* Make accesses to pages with watchpoints go via the
1609 watchpoint trap routines. */
1610 for (i = 0; i < env->nb_watchpoints; i++) {
1611 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1612 if (address & ~TARGET_PAGE_MASK) {
1613 env->watchpoint[i].is_ram = 0;
1614 address = vaddr | io_mem_watch;
1616 env->watchpoint[i].is_ram = 1;
1617 /* TODO: Figure out how to make read watchpoints coexist
1619 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1624 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1626 te = &env->tlb_table[is_user][index];
1627 te->addend = addend;
1628 if (prot & PAGE_READ) {
1629 te->addr_read = address;
1633 if (prot & PAGE_EXEC) {
1634 te->addr_code = address;
1638 if (prot & PAGE_WRITE) {
1639 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1640 (pd & IO_MEM_ROMD)) {
1641 /* write access calls the I/O callback */
1642 te->addr_write = vaddr |
1643 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1644 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1645 !cpu_physical_memory_is_dirty(pd)) {
1646 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1648 te->addr_write = address;
1651 te->addr_write = -1;
1654 #if !defined(CONFIG_SOFTMMU)
1656 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1657 /* IO access: no mapping is done as it will be handled by the
1659 if (!(env->hflags & HF_SOFTMMU_MASK))
1664 if (vaddr >= MMAP_AREA_END) {
1667 if (prot & PROT_WRITE) {
1668 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1669 #if defined(TARGET_HAS_SMC) || 1
1672 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1673 !cpu_physical_memory_is_dirty(pd))) {
1674 /* ROM: we do as if code was inside */
1675 /* if code is present, we only map as read only and save the
1679 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1682 vp->valid_tag = virt_valid_tag;
1683 prot &= ~PAGE_WRITE;
1686 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1687 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1688 if (map_addr == MAP_FAILED) {
1689 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1699 /* called from signal handler: invalidate the code and unprotect the
1700 page. Return TRUE if the fault was succesfully handled. */
1701 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1703 #if !defined(CONFIG_SOFTMMU)
1706 #if defined(DEBUG_TLB)
1707 printf("page_unprotect: addr=0x%08x\n", addr);
1709 addr &= TARGET_PAGE_MASK;
1711 /* if it is not mapped, no need to worry here */
1712 if (addr >= MMAP_AREA_END)
1714 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1717 /* NOTE: in this case, validate_tag is _not_ tested as it
1718 validates only the code TLB */
1719 if (vp->valid_tag != virt_valid_tag)
1721 if (!(vp->prot & PAGE_WRITE))
1723 #if defined(DEBUG_TLB)
1724 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1725 addr, vp->phys_addr, vp->prot);
1727 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1728 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1729 (unsigned long)addr, vp->prot);
1730 /* set the dirty bit */
1731 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1732 /* flush the code inside */
1733 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1742 void tlb_flush(CPUState *env, int flush_global)
1746 void tlb_flush_page(CPUState *env, target_ulong addr)
1750 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1751 target_phys_addr_t paddr, int prot,
1752 int is_user, int is_softmmu)
1757 /* dump memory mappings */
1758 void page_dump(FILE *f)
1760 unsigned long start, end;
1761 int i, j, prot, prot1;
1764 fprintf(f, "%-8s %-8s %-8s %s\n",
1765 "start", "end", "size", "prot");
1769 for(i = 0; i <= L1_SIZE; i++) {
1774 for(j = 0;j < L2_SIZE; j++) {
1779 if (prot1 != prot) {
1780 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1782 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1783 start, end, end - start,
1784 prot & PAGE_READ ? 'r' : '-',
1785 prot & PAGE_WRITE ? 'w' : '-',
1786 prot & PAGE_EXEC ? 'x' : '-');
1800 int page_get_flags(target_ulong address)
1804 p = page_find(address >> TARGET_PAGE_BITS);
1810 /* modify the flags of a page and invalidate the code if
1811 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1812 depending on PAGE_WRITE */
1813 void page_set_flags(target_ulong start, target_ulong end, int flags)
1818 start = start & TARGET_PAGE_MASK;
1819 end = TARGET_PAGE_ALIGN(end);
1820 if (flags & PAGE_WRITE)
1821 flags |= PAGE_WRITE_ORG;
1822 spin_lock(&tb_lock);
1823 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1824 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1825 /* if the write protection is set, then we invalidate the code
1827 if (!(p->flags & PAGE_WRITE) &&
1828 (flags & PAGE_WRITE) &&
1830 tb_invalidate_phys_page(addr, 0, NULL);
1834 spin_unlock(&tb_lock);
1837 /* called from signal handler: invalidate the code and unprotect the
1838 page. Return TRUE if the fault was succesfully handled. */
1839 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1841 unsigned int page_index, prot, pindex;
1843 target_ulong host_start, host_end, addr;
1845 host_start = address & qemu_host_page_mask;
1846 page_index = host_start >> TARGET_PAGE_BITS;
1847 p1 = page_find(page_index);
1850 host_end = host_start + qemu_host_page_size;
1853 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1857 /* if the page was really writable, then we change its
1858 protection back to writable */
1859 if (prot & PAGE_WRITE_ORG) {
1860 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1861 if (!(p1[pindex].flags & PAGE_WRITE)) {
1862 mprotect((void *)g2h(host_start), qemu_host_page_size,
1863 (prot & PAGE_BITS) | PAGE_WRITE);
1864 p1[pindex].flags |= PAGE_WRITE;
1865 /* and since the content will be modified, we must invalidate
1866 the corresponding translated code. */
1867 tb_invalidate_phys_page(address, pc, puc);
1868 #ifdef DEBUG_TB_CHECK
1869 tb_invalidate_check(address);
1877 /* call this function when system calls directly modify a memory area */
1878 /* ??? This should be redundant now we have lock_user. */
1879 void page_unprotect_range(target_ulong data, target_ulong data_size)
1881 target_ulong start, end, addr;
1884 end = start + data_size;
1885 start &= TARGET_PAGE_MASK;
1886 end = TARGET_PAGE_ALIGN(end);
1887 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1888 page_unprotect(addr, 0, NULL);
1892 static inline void tlb_set_dirty(CPUState *env,
1893 unsigned long addr, target_ulong vaddr)
1896 #endif /* defined(CONFIG_USER_ONLY) */
1898 /* register physical memory. 'size' must be a multiple of the target
1899 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1901 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1903 unsigned long phys_offset)
1905 target_phys_addr_t addr, end_addr;
1909 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1910 end_addr = start_addr + size;
1911 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1912 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1913 p->phys_offset = phys_offset;
1914 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1915 (phys_offset & IO_MEM_ROMD))
1916 phys_offset += TARGET_PAGE_SIZE;
1919 /* since each CPU stores ram addresses in its TLB cache, we must
1920 reset the modified entries */
1922 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1927 /* XXX: temporary until new memory mapping API */
1928 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1932 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1934 return IO_MEM_UNASSIGNED;
1935 return p->phys_offset;
1938 /* XXX: better than nothing */
1939 ram_addr_t qemu_ram_alloc(unsigned int size)
1942 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1943 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1944 size, phys_ram_size);
1947 addr = phys_ram_alloc_offset;
1948 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1952 void qemu_ram_free(ram_addr_t addr)
1956 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1958 #ifdef DEBUG_UNASSIGNED
1959 printf("Unassigned mem read 0x%08x\n", (int)addr);
1964 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1966 #ifdef DEBUG_UNASSIGNED
1967 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1971 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1972 unassigned_mem_readb,
1973 unassigned_mem_readb,
1974 unassigned_mem_readb,
1977 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1978 unassigned_mem_writeb,
1979 unassigned_mem_writeb,
1980 unassigned_mem_writeb,
1983 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1985 unsigned long ram_addr;
1987 ram_addr = addr - (unsigned long)phys_ram_base;
1988 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1989 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1990 #if !defined(CONFIG_USER_ONLY)
1991 tb_invalidate_phys_page_fast(ram_addr, 1);
1992 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1995 stb_p((uint8_t *)(long)addr, val);
1997 if (cpu_single_env->kqemu_enabled &&
1998 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1999 kqemu_modify_page(cpu_single_env, ram_addr);
2001 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2002 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2003 /* we remove the notdirty callback only if the code has been
2005 if (dirty_flags == 0xff)
2006 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2009 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2011 unsigned long ram_addr;
2013 ram_addr = addr - (unsigned long)phys_ram_base;
2014 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2015 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2016 #if !defined(CONFIG_USER_ONLY)
2017 tb_invalidate_phys_page_fast(ram_addr, 2);
2018 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2021 stw_p((uint8_t *)(long)addr, val);
2023 if (cpu_single_env->kqemu_enabled &&
2024 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2025 kqemu_modify_page(cpu_single_env, ram_addr);
2027 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2028 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2029 /* we remove the notdirty callback only if the code has been
2031 if (dirty_flags == 0xff)
2032 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2035 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2037 unsigned long ram_addr;
2039 ram_addr = addr - (unsigned long)phys_ram_base;
2040 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2041 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2042 #if !defined(CONFIG_USER_ONLY)
2043 tb_invalidate_phys_page_fast(ram_addr, 4);
2044 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2047 stl_p((uint8_t *)(long)addr, val);
2049 if (cpu_single_env->kqemu_enabled &&
2050 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2051 kqemu_modify_page(cpu_single_env, ram_addr);
2053 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2054 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2055 /* we remove the notdirty callback only if the code has been
2057 if (dirty_flags == 0xff)
2058 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2061 static CPUReadMemoryFunc *error_mem_read[3] = {
2062 NULL, /* never used */
2063 NULL, /* never used */
2064 NULL, /* never used */
2067 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2068 notdirty_mem_writeb,
2069 notdirty_mem_writew,
2070 notdirty_mem_writel,
2073 #if defined(CONFIG_SOFTMMU)
2074 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2075 so these check for a hit then pass through to the normal out-of-line
2077 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2079 return ldub_phys(addr);
2082 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2084 return lduw_phys(addr);
2087 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2089 return ldl_phys(addr);
2092 /* Generate a debug exception if a watchpoint has been hit.
2093 Returns the real physical address of the access. addr will be a host
2094 address in the is_ram case. */
2095 static target_ulong check_watchpoint(target_phys_addr_t addr)
2097 CPUState *env = cpu_single_env;
2099 target_ulong retaddr;
2103 for (i = 0; i < env->nb_watchpoints; i++) {
2104 watch = env->watchpoint[i].vaddr;
2105 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2106 if (env->watchpoint[i].is_ram)
2107 retaddr = addr - (unsigned long)phys_ram_base;
2108 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2109 cpu_single_env->watchpoint_hit = i + 1;
2110 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2118 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2121 addr = check_watchpoint(addr);
2122 stb_phys(addr, val);
2125 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2128 addr = check_watchpoint(addr);
2129 stw_phys(addr, val);
2132 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2135 addr = check_watchpoint(addr);
2136 stl_phys(addr, val);
2139 static CPUReadMemoryFunc *watch_mem_read[3] = {
2145 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2152 static void io_mem_init(void)
2154 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2155 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2156 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2159 #if defined(CONFIG_SOFTMMU)
2160 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2161 watch_mem_write, NULL);
2163 /* alloc dirty bits array */
2164 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2165 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2168 /* mem_read and mem_write are arrays of functions containing the
2169 function to access byte (index 0), word (index 1) and dword (index
2170 2). All functions must be supplied. If io_index is non zero, the
2171 corresponding io zone is modified. If it is zero, a new io zone is
2172 allocated. The return value can be used with
2173 cpu_register_physical_memory(). (-1) is returned if error. */
2174 int cpu_register_io_memory(int io_index,
2175 CPUReadMemoryFunc **mem_read,
2176 CPUWriteMemoryFunc **mem_write,
2181 if (io_index <= 0) {
2182 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2184 io_index = io_mem_nb++;
2186 if (io_index >= IO_MEM_NB_ENTRIES)
2190 for(i = 0;i < 3; i++) {
2191 io_mem_read[io_index][i] = mem_read[i];
2192 io_mem_write[io_index][i] = mem_write[i];
2194 io_mem_opaque[io_index] = opaque;
2195 return io_index << IO_MEM_SHIFT;
2198 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2200 return io_mem_write[io_index >> IO_MEM_SHIFT];
2203 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2205 return io_mem_read[io_index >> IO_MEM_SHIFT];
2208 /* physical memory access (slow version, mainly for debug) */
2209 #if defined(CONFIG_USER_ONLY)
2210 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2211 int len, int is_write)
2218 page = addr & TARGET_PAGE_MASK;
2219 l = (page + TARGET_PAGE_SIZE) - addr;
2222 flags = page_get_flags(page);
2223 if (!(flags & PAGE_VALID))
2226 if (!(flags & PAGE_WRITE))
2228 p = lock_user(addr, len, 0);
2229 memcpy(p, buf, len);
2230 unlock_user(p, addr, len);
2232 if (!(flags & PAGE_READ))
2234 p = lock_user(addr, len, 1);
2235 memcpy(buf, p, len);
2236 unlock_user(p, addr, 0);
2245 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2246 int len, int is_write)
2251 target_phys_addr_t page;
2256 page = addr & TARGET_PAGE_MASK;
2257 l = (page + TARGET_PAGE_SIZE) - addr;
2260 p = phys_page_find(page >> TARGET_PAGE_BITS);
2262 pd = IO_MEM_UNASSIGNED;
2264 pd = p->phys_offset;
2268 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2269 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2270 /* XXX: could force cpu_single_env to NULL to avoid
2272 if (l >= 4 && ((addr & 3) == 0)) {
2273 /* 32 bit write access */
2275 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2277 } else if (l >= 2 && ((addr & 1) == 0)) {
2278 /* 16 bit write access */
2280 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2283 /* 8 bit write access */
2285 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2289 unsigned long addr1;
2290 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2292 ptr = phys_ram_base + addr1;
2293 memcpy(ptr, buf, l);
2294 if (!cpu_physical_memory_is_dirty(addr1)) {
2295 /* invalidate code */
2296 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2298 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2299 (0xff & ~CODE_DIRTY_FLAG);
2303 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2304 !(pd & IO_MEM_ROMD)) {
2306 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2307 if (l >= 4 && ((addr & 3) == 0)) {
2308 /* 32 bit read access */
2309 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2312 } else if (l >= 2 && ((addr & 1) == 0)) {
2313 /* 16 bit read access */
2314 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2318 /* 8 bit read access */
2319 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2325 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2326 (addr & ~TARGET_PAGE_MASK);
2327 memcpy(buf, ptr, l);
2336 /* used for ROM loading : can write in RAM and ROM */
2337 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2338 const uint8_t *buf, int len)
2342 target_phys_addr_t page;
2347 page = addr & TARGET_PAGE_MASK;
2348 l = (page + TARGET_PAGE_SIZE) - addr;
2351 p = phys_page_find(page >> TARGET_PAGE_BITS);
2353 pd = IO_MEM_UNASSIGNED;
2355 pd = p->phys_offset;
2358 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2359 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2360 !(pd & IO_MEM_ROMD)) {
2363 unsigned long addr1;
2364 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2366 ptr = phys_ram_base + addr1;
2367 memcpy(ptr, buf, l);
2376 /* warning: addr must be aligned */
2377 uint32_t ldl_phys(target_phys_addr_t addr)
2385 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2387 pd = IO_MEM_UNASSIGNED;
2389 pd = p->phys_offset;
2392 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2393 !(pd & IO_MEM_ROMD)) {
2395 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2396 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2399 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2400 (addr & ~TARGET_PAGE_MASK);
2406 /* warning: addr must be aligned */
2407 uint64_t ldq_phys(target_phys_addr_t addr)
2415 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2417 pd = IO_MEM_UNASSIGNED;
2419 pd = p->phys_offset;
2422 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2423 !(pd & IO_MEM_ROMD)) {
2425 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2426 #ifdef TARGET_WORDS_BIGENDIAN
2427 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2428 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2430 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2431 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2435 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2436 (addr & ~TARGET_PAGE_MASK);
2443 uint32_t ldub_phys(target_phys_addr_t addr)
2446 cpu_physical_memory_read(addr, &val, 1);
2451 uint32_t lduw_phys(target_phys_addr_t addr)
2454 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2455 return tswap16(val);
2458 /* warning: addr must be aligned. The ram page is not masked as dirty
2459 and the code inside is not invalidated. It is useful if the dirty
2460 bits are used to track modified PTEs */
2461 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2468 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2470 pd = IO_MEM_UNASSIGNED;
2472 pd = p->phys_offset;
2475 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2476 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2477 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2479 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2480 (addr & ~TARGET_PAGE_MASK);
2485 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2492 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2494 pd = IO_MEM_UNASSIGNED;
2496 pd = p->phys_offset;
2499 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2500 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2501 #ifdef TARGET_WORDS_BIGENDIAN
2502 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2503 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2505 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2506 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2509 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2510 (addr & ~TARGET_PAGE_MASK);
2515 /* warning: addr must be aligned */
2516 void stl_phys(target_phys_addr_t addr, uint32_t val)
2523 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2525 pd = IO_MEM_UNASSIGNED;
2527 pd = p->phys_offset;
2530 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2531 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2532 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2534 unsigned long addr1;
2535 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2537 ptr = phys_ram_base + addr1;
2539 if (!cpu_physical_memory_is_dirty(addr1)) {
2540 /* invalidate code */
2541 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2543 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2544 (0xff & ~CODE_DIRTY_FLAG);
2550 void stb_phys(target_phys_addr_t addr, uint32_t val)
2553 cpu_physical_memory_write(addr, &v, 1);
2557 void stw_phys(target_phys_addr_t addr, uint32_t val)
2559 uint16_t v = tswap16(val);
2560 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2564 void stq_phys(target_phys_addr_t addr, uint64_t val)
2567 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2572 /* virtual memory access for debug */
2573 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2574 uint8_t *buf, int len, int is_write)
2577 target_ulong page, phys_addr;
2580 page = addr & TARGET_PAGE_MASK;
2581 phys_addr = cpu_get_phys_page_debug(env, page);
2582 /* if no physical page mapped, return an error */
2583 if (phys_addr == -1)
2585 l = (page + TARGET_PAGE_SIZE) - addr;
2588 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2597 void dump_exec_info(FILE *f,
2598 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2600 int i, target_code_size, max_target_code_size;
2601 int direct_jmp_count, direct_jmp2_count, cross_page;
2602 TranslationBlock *tb;
2604 target_code_size = 0;
2605 max_target_code_size = 0;
2607 direct_jmp_count = 0;
2608 direct_jmp2_count = 0;
2609 for(i = 0; i < nb_tbs; i++) {
2611 target_code_size += tb->size;
2612 if (tb->size > max_target_code_size)
2613 max_target_code_size = tb->size;
2614 if (tb->page_addr[1] != -1)
2616 if (tb->tb_next_offset[0] != 0xffff) {
2618 if (tb->tb_next_offset[1] != 0xffff) {
2619 direct_jmp2_count++;
2623 /* XXX: avoid using doubles ? */
2624 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2625 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2626 nb_tbs ? target_code_size / nb_tbs : 0,
2627 max_target_code_size);
2628 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2629 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2630 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2631 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2633 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2634 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2636 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2638 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2639 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2640 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2641 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2644 #if !defined(CONFIG_USER_ONLY)
2646 #define MMUSUFFIX _cmmu
2647 #define GETPC() NULL
2648 #define env cpu_single_env
2649 #define SOFTMMU_CODE_ACCESS
2652 #include "softmmu_template.h"
2655 #include "softmmu_template.h"
2658 #include "softmmu_template.h"
2661 #include "softmmu_template.h"