2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
43 #if defined(CONFIG_USER_ONLY)
47 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
102 #define code_gen_section \
103 __attribute__((aligned (32)))
106 uint8_t code_gen_prologue[1024] code_gen_section;
107 static uint8_t *code_gen_buffer;
108 static unsigned long code_gen_buffer_size;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size;
111 uint8_t *code_gen_ptr;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size;
116 uint8_t *phys_ram_base;
117 uint8_t *phys_ram_dirty;
118 static int in_migration;
119 static ram_addr_t phys_ram_alloc_offset = 0;
123 /* current CPU in the current thread. It is only valid inside
125 CPUState *cpu_single_env;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
134 typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141 #if defined(CONFIG_USER_ONLY)
146 typedef struct PhysPageDesc {
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset;
152 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153 /* XXX: this is a temporary hack for alpha target.
154 * In the future, this is to be replaced by a multi-level table
155 * to actually be able to handle the complete 64 bits address space.
157 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
162 #define L1_SIZE (1 << L1_BITS)
163 #define L2_SIZE (1 << L2_BITS)
165 unsigned long qemu_real_host_page_size;
166 unsigned long qemu_host_page_bits;
167 unsigned long qemu_host_page_size;
168 unsigned long qemu_host_page_mask;
170 /* XXX: for system emulation, it could just be an array */
171 static PageDesc *l1_map[L1_SIZE];
172 static PhysPageDesc **l1_phys_map;
174 #if !defined(CONFIG_USER_ONLY)
175 static void io_mem_init(void);
177 /* io memory support */
178 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
179 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
180 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
181 static int io_mem_nb;
182 static int io_mem_watch;
186 static const char *logfilename = "/tmp/qemu.log";
189 static int log_append = 0;
192 static int tlb_flush_count;
193 static int tb_flush_count;
194 static int tb_phys_invalidate_count;
196 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197 typedef struct subpage_t {
198 target_phys_addr_t base;
199 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
200 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
201 void *opaque[TARGET_PAGE_SIZE][2][4];
205 static void map_exec(void *addr, long size)
208 VirtualProtect(addr, size,
209 PAGE_EXECUTE_READWRITE, &old_protect);
213 static void map_exec(void *addr, long size)
215 unsigned long start, end, page_size;
217 page_size = getpagesize();
218 start = (unsigned long)addr;
219 start &= ~(page_size - 1);
221 end = (unsigned long)addr + size;
222 end += page_size - 1;
223 end &= ~(page_size - 1);
225 mprotect((void *)start, end - start,
226 PROT_READ | PROT_WRITE | PROT_EXEC);
230 static void page_init(void)
232 /* NOTE: we can always suppose that qemu_host_page_size >=
236 SYSTEM_INFO system_info;
238 GetSystemInfo(&system_info);
239 qemu_real_host_page_size = system_info.dwPageSize;
242 qemu_real_host_page_size = getpagesize();
244 if (qemu_host_page_size == 0)
245 qemu_host_page_size = qemu_real_host_page_size;
246 if (qemu_host_page_size < TARGET_PAGE_SIZE)
247 qemu_host_page_size = TARGET_PAGE_SIZE;
248 qemu_host_page_bits = 0;
249 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
250 qemu_host_page_bits++;
251 qemu_host_page_mask = ~(qemu_host_page_size - 1);
252 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
253 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
255 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
257 long long startaddr, endaddr;
262 last_brk = (unsigned long)sbrk(0);
263 f = fopen("/proc/self/maps", "r");
266 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
268 startaddr = MIN(startaddr,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270 endaddr = MIN(endaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 page_set_flags(startaddr & TARGET_PAGE_MASK,
273 TARGET_PAGE_ALIGN(endaddr),
284 static inline PageDesc **page_l1_map(target_ulong index)
286 #if TARGET_LONG_BITS > 32
287 /* Host memory outside guest VM. For 32-bit targets we have already
288 excluded high addresses. */
289 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292 return &l1_map[index >> L2_BITS];
295 static inline PageDesc *page_find_alloc(target_ulong index)
298 lp = page_l1_map(index);
304 /* allocate if not found */
305 #if defined(CONFIG_USER_ONLY)
307 size_t len = sizeof(PageDesc) * L2_SIZE;
308 /* Don't use qemu_malloc because it may recurse. */
309 p = mmap(0, len, PROT_READ | PROT_WRITE,
310 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
313 if (addr == (target_ulong)addr) {
314 page_set_flags(addr & TARGET_PAGE_MASK,
315 TARGET_PAGE_ALIGN(addr + len),
319 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
323 return p + (index & (L2_SIZE - 1));
326 static inline PageDesc *page_find(target_ulong index)
329 lp = page_l1_map(index);
336 return p + (index & (L2_SIZE - 1));
339 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
344 p = (void **)l1_phys_map;
345 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
347 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
353 /* allocate if not found */
356 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
357 memset(p, 0, sizeof(void *) * L1_SIZE);
361 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
365 /* allocate if not found */
368 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370 for (i = 0; i < L2_SIZE; i++)
371 pd[i].phys_offset = IO_MEM_UNASSIGNED;
373 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
376 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
378 return phys_page_find_alloc(index, 0);
381 #if !defined(CONFIG_USER_ONLY)
382 static void tlb_protect_code(ram_addr_t ram_addr);
383 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
385 #define mmap_lock() do { } while(0)
386 #define mmap_unlock() do { } while(0)
389 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
391 #if defined(CONFIG_USER_ONLY)
392 /* Currently it is not recommanded to allocate big chunks of data in
393 user mode. It will change when a dedicated libc will be used */
394 #define USE_STATIC_CODE_GEN_BUFFER
397 #ifdef USE_STATIC_CODE_GEN_BUFFER
398 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
401 static void code_gen_alloc(unsigned long tb_size)
403 #ifdef USE_STATIC_CODE_GEN_BUFFER
404 code_gen_buffer = static_code_gen_buffer;
405 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406 map_exec(code_gen_buffer, code_gen_buffer_size);
408 code_gen_buffer_size = tb_size;
409 if (code_gen_buffer_size == 0) {
410 #if defined(CONFIG_USER_ONLY)
411 /* in user mode, phys_ram_size is not meaningful */
412 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
414 /* XXX: needs ajustments */
415 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
418 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420 /* The code gen buffer location may have constraints depending on
421 the host cpu and OS */
422 #if defined(__linux__)
427 flags = MAP_PRIVATE | MAP_ANONYMOUS;
428 #if defined(__x86_64__)
430 /* Cannot map more than that */
431 if (code_gen_buffer_size > (800 * 1024 * 1024))
432 code_gen_buffer_size = (800 * 1024 * 1024);
433 #elif defined(__sparc_v9__)
434 // Map the buffer below 2G, so we can use direct calls and branches
436 start = (void *) 0x60000000UL;
437 if (code_gen_buffer_size > (512 * 1024 * 1024))
438 code_gen_buffer_size = (512 * 1024 * 1024);
439 #elif defined(__arm__)
440 /* Map the buffer below 64M, so we can use direct calls and branches */
442 start = (void *) 0x01000000UL;
443 if (code_gen_buffer_size > 16 * 1024 * 1024)
444 code_gen_buffer_size = 16 * 1024 * 1024;
446 code_gen_buffer = mmap(start, code_gen_buffer_size,
447 PROT_WRITE | PROT_READ | PROT_EXEC,
449 if (code_gen_buffer == MAP_FAILED) {
450 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
454 #elif defined(__FreeBSD__)
458 flags = MAP_PRIVATE | MAP_ANONYMOUS;
459 #if defined(__x86_64__)
460 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
461 * 0x40000000 is free */
463 addr = (void *)0x40000000;
464 /* Cannot map more than that */
465 if (code_gen_buffer_size > (800 * 1024 * 1024))
466 code_gen_buffer_size = (800 * 1024 * 1024);
468 code_gen_buffer = mmap(addr, code_gen_buffer_size,
469 PROT_WRITE | PROT_READ | PROT_EXEC,
471 if (code_gen_buffer == MAP_FAILED) {
472 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
477 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
478 if (!code_gen_buffer) {
479 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482 map_exec(code_gen_buffer, code_gen_buffer_size);
484 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
485 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
486 code_gen_buffer_max_size = code_gen_buffer_size -
487 code_gen_max_block_size();
488 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
489 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
492 /* Must be called before using the QEMU cpus. 'tb_size' is the size
493 (in bytes) allocated to the translation buffer. Zero means default
495 void cpu_exec_init_all(unsigned long tb_size)
498 code_gen_alloc(tb_size);
499 code_gen_ptr = code_gen_buffer;
501 #if !defined(CONFIG_USER_ONLY)
506 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
508 #define CPU_COMMON_SAVE_VERSION 1
510 static void cpu_common_save(QEMUFile *f, void *opaque)
512 CPUState *env = opaque;
514 qemu_put_be32s(f, &env->halted);
515 qemu_put_be32s(f, &env->interrupt_request);
518 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
520 CPUState *env = opaque;
522 if (version_id != CPU_COMMON_SAVE_VERSION)
525 qemu_get_be32s(f, &env->halted);
526 qemu_get_be32s(f, &env->interrupt_request);
533 void cpu_exec_init(CPUState *env)
538 env->next_cpu = NULL;
541 while (*penv != NULL) {
542 penv = (CPUState **)&(*penv)->next_cpu;
545 env->cpu_index = cpu_index;
546 TAILQ_INIT(&env->breakpoints);
547 TAILQ_INIT(&env->watchpoints);
549 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
550 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
551 cpu_common_save, cpu_common_load, env);
552 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
553 cpu_save, cpu_load, env);
557 static inline void invalidate_page_bitmap(PageDesc *p)
559 if (p->code_bitmap) {
560 qemu_free(p->code_bitmap);
561 p->code_bitmap = NULL;
563 p->code_write_count = 0;
566 /* set to NULL all the 'first_tb' fields in all PageDescs */
567 static void page_flush_tb(void)
572 for(i = 0; i < L1_SIZE; i++) {
575 for(j = 0; j < L2_SIZE; j++) {
577 invalidate_page_bitmap(p);
584 /* flush all the translation blocks */
585 /* XXX: tb_flush is currently not thread safe */
586 void tb_flush(CPUState *env1)
589 #if defined(DEBUG_FLUSH)
590 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
591 (unsigned long)(code_gen_ptr - code_gen_buffer),
593 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
595 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
596 cpu_abort(env1, "Internal error: code buffer overflow\n");
600 for(env = first_cpu; env != NULL; env = env->next_cpu) {
601 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
604 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
607 code_gen_ptr = code_gen_buffer;
608 /* XXX: flush processor icache at this point if cache flush is
613 #ifdef DEBUG_TB_CHECK
615 static void tb_invalidate_check(target_ulong address)
617 TranslationBlock *tb;
619 address &= TARGET_PAGE_MASK;
620 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
621 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
622 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
623 address >= tb->pc + tb->size)) {
624 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
625 address, (long)tb->pc, tb->size);
631 /* verify that all the pages have correct rights for code */
632 static void tb_page_check(void)
634 TranslationBlock *tb;
635 int i, flags1, flags2;
637 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
638 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
639 flags1 = page_get_flags(tb->pc);
640 flags2 = page_get_flags(tb->pc + tb->size - 1);
641 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
642 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
643 (long)tb->pc, tb->size, flags1, flags2);
649 static void tb_jmp_check(TranslationBlock *tb)
651 TranslationBlock *tb1;
654 /* suppress any remaining jumps to this TB */
658 tb1 = (TranslationBlock *)((long)tb1 & ~3);
661 tb1 = tb1->jmp_next[n1];
663 /* check end of list */
665 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
671 /* invalidate one TB */
672 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
675 TranslationBlock *tb1;
679 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
682 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
686 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
688 TranslationBlock *tb1;
694 tb1 = (TranslationBlock *)((long)tb1 & ~3);
696 *ptb = tb1->page_next[n1];
699 ptb = &tb1->page_next[n1];
703 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
705 TranslationBlock *tb1, **ptb;
708 ptb = &tb->jmp_next[n];
711 /* find tb(n) in circular list */
715 tb1 = (TranslationBlock *)((long)tb1 & ~3);
716 if (n1 == n && tb1 == tb)
719 ptb = &tb1->jmp_first;
721 ptb = &tb1->jmp_next[n1];
724 /* now we can suppress tb(n) from the list */
725 *ptb = tb->jmp_next[n];
727 tb->jmp_next[n] = NULL;
731 /* reset the jump entry 'n' of a TB so that it is not chained to
733 static inline void tb_reset_jump(TranslationBlock *tb, int n)
735 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
738 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
743 target_phys_addr_t phys_pc;
744 TranslationBlock *tb1, *tb2;
746 /* remove the TB from the hash list */
747 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
748 h = tb_phys_hash_func(phys_pc);
749 tb_remove(&tb_phys_hash[h], tb,
750 offsetof(TranslationBlock, phys_hash_next));
752 /* remove the TB from the page list */
753 if (tb->page_addr[0] != page_addr) {
754 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
755 tb_page_remove(&p->first_tb, tb);
756 invalidate_page_bitmap(p);
758 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
759 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
760 tb_page_remove(&p->first_tb, tb);
761 invalidate_page_bitmap(p);
764 tb_invalidated_flag = 1;
766 /* remove the TB from the hash list */
767 h = tb_jmp_cache_hash_func(tb->pc);
768 for(env = first_cpu; env != NULL; env = env->next_cpu) {
769 if (env->tb_jmp_cache[h] == tb)
770 env->tb_jmp_cache[h] = NULL;
773 /* suppress this TB from the two jump lists */
774 tb_jmp_remove(tb, 0);
775 tb_jmp_remove(tb, 1);
777 /* suppress any remaining jumps to this TB */
783 tb1 = (TranslationBlock *)((long)tb1 & ~3);
784 tb2 = tb1->jmp_next[n1];
785 tb_reset_jump(tb1, n1);
786 tb1->jmp_next[n1] = NULL;
789 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
791 tb_phys_invalidate_count++;
794 static inline void set_bits(uint8_t *tab, int start, int len)
800 mask = 0xff << (start & 7);
801 if ((start & ~7) == (end & ~7)) {
803 mask &= ~(0xff << (end & 7));
808 start = (start + 8) & ~7;
810 while (start < end1) {
815 mask = ~(0xff << (end & 7));
821 static void build_page_bitmap(PageDesc *p)
823 int n, tb_start, tb_end;
824 TranslationBlock *tb;
826 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
833 tb = (TranslationBlock *)((long)tb & ~3);
834 /* NOTE: this is subtle as a TB may span two physical pages */
836 /* NOTE: tb_end may be after the end of the page, but
837 it is not a problem */
838 tb_start = tb->pc & ~TARGET_PAGE_MASK;
839 tb_end = tb_start + tb->size;
840 if (tb_end > TARGET_PAGE_SIZE)
841 tb_end = TARGET_PAGE_SIZE;
844 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
846 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
847 tb = tb->page_next[n];
851 TranslationBlock *tb_gen_code(CPUState *env,
852 target_ulong pc, target_ulong cs_base,
853 int flags, int cflags)
855 TranslationBlock *tb;
857 target_ulong phys_pc, phys_page2, virt_page2;
860 phys_pc = get_phys_addr_code(env, pc);
863 /* flush must be done */
865 /* cannot fail at this point */
867 /* Don't forget to invalidate previous TB info. */
868 tb_invalidated_flag = 1;
870 tc_ptr = code_gen_ptr;
872 tb->cs_base = cs_base;
875 cpu_gen_code(env, tb, &code_gen_size);
876 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
878 /* check next page if needed */
879 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
881 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
882 phys_page2 = get_phys_addr_code(env, virt_page2);
884 tb_link_phys(tb, phys_pc, phys_page2);
888 /* invalidate all TBs which intersect with the target physical page
889 starting in range [start;end[. NOTE: start and end must refer to
890 the same physical page. 'is_cpu_write_access' should be true if called
891 from a real cpu write access: the virtual CPU will exit the current
892 TB if code is modified inside this TB. */
893 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
894 int is_cpu_write_access)
896 TranslationBlock *tb, *tb_next, *saved_tb;
897 CPUState *env = cpu_single_env;
898 target_ulong tb_start, tb_end;
901 #ifdef TARGET_HAS_PRECISE_SMC
902 int current_tb_not_found = is_cpu_write_access;
903 TranslationBlock *current_tb = NULL;
904 int current_tb_modified = 0;
905 target_ulong current_pc = 0;
906 target_ulong current_cs_base = 0;
907 int current_flags = 0;
908 #endif /* TARGET_HAS_PRECISE_SMC */
910 p = page_find(start >> TARGET_PAGE_BITS);
913 if (!p->code_bitmap &&
914 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
915 is_cpu_write_access) {
916 /* build code bitmap */
917 build_page_bitmap(p);
920 /* we remove all the TBs in the range [start, end[ */
921 /* XXX: see if in some cases it could be faster to invalidate all the code */
925 tb = (TranslationBlock *)((long)tb & ~3);
926 tb_next = tb->page_next[n];
927 /* NOTE: this is subtle as a TB may span two physical pages */
929 /* NOTE: tb_end may be after the end of the page, but
930 it is not a problem */
931 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
932 tb_end = tb_start + tb->size;
934 tb_start = tb->page_addr[1];
935 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
937 if (!(tb_end <= start || tb_start >= end)) {
938 #ifdef TARGET_HAS_PRECISE_SMC
939 if (current_tb_not_found) {
940 current_tb_not_found = 0;
942 if (env->mem_io_pc) {
943 /* now we have a real cpu fault */
944 current_tb = tb_find_pc(env->mem_io_pc);
947 if (current_tb == tb &&
948 (current_tb->cflags & CF_COUNT_MASK) != 1) {
949 /* If we are modifying the current TB, we must stop
950 its execution. We could be more precise by checking
951 that the modification is after the current PC, but it
952 would require a specialized function to partially
953 restore the CPU state */
955 current_tb_modified = 1;
956 cpu_restore_state(current_tb, env,
957 env->mem_io_pc, NULL);
958 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
961 #endif /* TARGET_HAS_PRECISE_SMC */
962 /* we need to do that to handle the case where a signal
963 occurs while doing tb_phys_invalidate() */
966 saved_tb = env->current_tb;
967 env->current_tb = NULL;
969 tb_phys_invalidate(tb, -1);
971 env->current_tb = saved_tb;
972 if (env->interrupt_request && env->current_tb)
973 cpu_interrupt(env, env->interrupt_request);
978 #if !defined(CONFIG_USER_ONLY)
979 /* if no code remaining, no need to continue to use slow writes */
981 invalidate_page_bitmap(p);
982 if (is_cpu_write_access) {
983 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
987 #ifdef TARGET_HAS_PRECISE_SMC
988 if (current_tb_modified) {
989 /* we generate a block containing just the instruction
990 modifying the memory. It will ensure that it cannot modify
992 env->current_tb = NULL;
993 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
994 cpu_resume_from_signal(env, NULL);
999 /* len must be <= 8 and start must be a multiple of len */
1000 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1007 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1008 cpu_single_env->mem_io_vaddr, len,
1009 cpu_single_env->eip,
1010 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1014 p = page_find(start >> TARGET_PAGE_BITS);
1017 if (p->code_bitmap) {
1018 offset = start & ~TARGET_PAGE_MASK;
1019 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1020 if (b & ((1 << len) - 1))
1024 tb_invalidate_phys_page_range(start, start + len, 1);
1028 #if !defined(CONFIG_SOFTMMU)
1029 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1030 unsigned long pc, void *puc)
1032 TranslationBlock *tb;
1035 #ifdef TARGET_HAS_PRECISE_SMC
1036 TranslationBlock *current_tb = NULL;
1037 CPUState *env = cpu_single_env;
1038 int current_tb_modified = 0;
1039 target_ulong current_pc = 0;
1040 target_ulong current_cs_base = 0;
1041 int current_flags = 0;
1044 addr &= TARGET_PAGE_MASK;
1045 p = page_find(addr >> TARGET_PAGE_BITS);
1049 #ifdef TARGET_HAS_PRECISE_SMC
1050 if (tb && pc != 0) {
1051 current_tb = tb_find_pc(pc);
1054 while (tb != NULL) {
1056 tb = (TranslationBlock *)((long)tb & ~3);
1057 #ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb == tb &&
1059 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1060 /* If we are modifying the current TB, we must stop
1061 its execution. We could be more precise by checking
1062 that the modification is after the current PC, but it
1063 would require a specialized function to partially
1064 restore the CPU state */
1066 current_tb_modified = 1;
1067 cpu_restore_state(current_tb, env, pc, puc);
1068 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1071 #endif /* TARGET_HAS_PRECISE_SMC */
1072 tb_phys_invalidate(tb, addr);
1073 tb = tb->page_next[n];
1076 #ifdef TARGET_HAS_PRECISE_SMC
1077 if (current_tb_modified) {
1078 /* we generate a block containing just the instruction
1079 modifying the memory. It will ensure that it cannot modify
1081 env->current_tb = NULL;
1082 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1083 cpu_resume_from_signal(env, puc);
1089 /* add the tb in the target page and protect it if necessary */
1090 static inline void tb_alloc_page(TranslationBlock *tb,
1091 unsigned int n, target_ulong page_addr)
1094 TranslationBlock *last_first_tb;
1096 tb->page_addr[n] = page_addr;
1097 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1098 tb->page_next[n] = p->first_tb;
1099 last_first_tb = p->first_tb;
1100 p->first_tb = (TranslationBlock *)((long)tb | n);
1101 invalidate_page_bitmap(p);
1103 #if defined(TARGET_HAS_SMC) || 1
1105 #if defined(CONFIG_USER_ONLY)
1106 if (p->flags & PAGE_WRITE) {
1111 /* force the host page as non writable (writes will have a
1112 page fault + mprotect overhead) */
1113 page_addr &= qemu_host_page_mask;
1115 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1116 addr += TARGET_PAGE_SIZE) {
1118 p2 = page_find (addr >> TARGET_PAGE_BITS);
1122 p2->flags &= ~PAGE_WRITE;
1123 page_get_flags(addr);
1125 mprotect(g2h(page_addr), qemu_host_page_size,
1126 (prot & PAGE_BITS) & ~PAGE_WRITE);
1127 #ifdef DEBUG_TB_INVALIDATE
1128 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1133 /* if some code is already present, then the pages are already
1134 protected. So we handle the case where only the first TB is
1135 allocated in a physical page */
1136 if (!last_first_tb) {
1137 tlb_protect_code(page_addr);
1141 #endif /* TARGET_HAS_SMC */
1144 /* Allocate a new translation block. Flush the translation buffer if
1145 too many translation blocks or too much generated code. */
1146 TranslationBlock *tb_alloc(target_ulong pc)
1148 TranslationBlock *tb;
1150 if (nb_tbs >= code_gen_max_blocks ||
1151 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1153 tb = &tbs[nb_tbs++];
1159 void tb_free(TranslationBlock *tb)
1161 /* In practice this is mostly used for single use temporary TB
1162 Ignore the hard cases and just back up if this TB happens to
1163 be the last one generated. */
1164 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1165 code_gen_ptr = tb->tc_ptr;
1170 /* add a new TB and link it to the physical page tables. phys_page2 is
1171 (-1) to indicate that only one page contains the TB. */
1172 void tb_link_phys(TranslationBlock *tb,
1173 target_ulong phys_pc, target_ulong phys_page2)
1176 TranslationBlock **ptb;
1178 /* Grab the mmap lock to stop another thread invalidating this TB
1179 before we are done. */
1181 /* add in the physical hash table */
1182 h = tb_phys_hash_func(phys_pc);
1183 ptb = &tb_phys_hash[h];
1184 tb->phys_hash_next = *ptb;
1187 /* add in the page list */
1188 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1189 if (phys_page2 != -1)
1190 tb_alloc_page(tb, 1, phys_page2);
1192 tb->page_addr[1] = -1;
1194 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1195 tb->jmp_next[0] = NULL;
1196 tb->jmp_next[1] = NULL;
1198 /* init original jump addresses */
1199 if (tb->tb_next_offset[0] != 0xffff)
1200 tb_reset_jump(tb, 0);
1201 if (tb->tb_next_offset[1] != 0xffff)
1202 tb_reset_jump(tb, 1);
1204 #ifdef DEBUG_TB_CHECK
1210 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1211 tb[1].tc_ptr. Return NULL if not found */
1212 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1214 int m_min, m_max, m;
1216 TranslationBlock *tb;
1220 if (tc_ptr < (unsigned long)code_gen_buffer ||
1221 tc_ptr >= (unsigned long)code_gen_ptr)
1223 /* binary search (cf Knuth) */
1226 while (m_min <= m_max) {
1227 m = (m_min + m_max) >> 1;
1229 v = (unsigned long)tb->tc_ptr;
1232 else if (tc_ptr < v) {
1241 static void tb_reset_jump_recursive(TranslationBlock *tb);
1243 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1245 TranslationBlock *tb1, *tb_next, **ptb;
1248 tb1 = tb->jmp_next[n];
1250 /* find head of list */
1253 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1256 tb1 = tb1->jmp_next[n1];
1258 /* we are now sure now that tb jumps to tb1 */
1261 /* remove tb from the jmp_first list */
1262 ptb = &tb_next->jmp_first;
1266 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1267 if (n1 == n && tb1 == tb)
1269 ptb = &tb1->jmp_next[n1];
1271 *ptb = tb->jmp_next[n];
1272 tb->jmp_next[n] = NULL;
1274 /* suppress the jump to next tb in generated code */
1275 tb_reset_jump(tb, n);
1277 /* suppress jumps in the tb on which we could have jumped */
1278 tb_reset_jump_recursive(tb_next);
1282 static void tb_reset_jump_recursive(TranslationBlock *tb)
1284 tb_reset_jump_recursive2(tb, 0);
1285 tb_reset_jump_recursive2(tb, 1);
1288 #if defined(TARGET_HAS_ICE)
1289 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1291 target_phys_addr_t addr;
1293 ram_addr_t ram_addr;
1296 addr = cpu_get_phys_page_debug(env, pc);
1297 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1299 pd = IO_MEM_UNASSIGNED;
1301 pd = p->phys_offset;
1303 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1304 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1308 /* Add a watchpoint. */
1309 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1310 int flags, CPUWatchpoint **watchpoint)
1312 target_ulong len_mask = ~(len - 1);
1315 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1316 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1317 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1318 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1321 wp = qemu_malloc(sizeof(*wp));
1326 wp->len_mask = len_mask;
1329 /* keep all GDB-injected watchpoints in front */
1331 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1333 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1335 tlb_flush_page(env, addr);
1342 /* Remove a specific watchpoint. */
1343 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1346 target_ulong len_mask = ~(len - 1);
1349 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1350 if (addr == wp->vaddr && len_mask == wp->len_mask
1351 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1352 cpu_watchpoint_remove_by_ref(env, wp);
1359 /* Remove a specific watchpoint by reference. */
1360 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1362 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1364 tlb_flush_page(env, watchpoint->vaddr);
1366 qemu_free(watchpoint);
1369 /* Remove all matching watchpoints. */
1370 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1372 CPUWatchpoint *wp, *next;
1374 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1375 if (wp->flags & mask)
1376 cpu_watchpoint_remove_by_ref(env, wp);
1380 /* Add a breakpoint. */
1381 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1382 CPUBreakpoint **breakpoint)
1384 #if defined(TARGET_HAS_ICE)
1387 bp = qemu_malloc(sizeof(*bp));
1394 /* keep all GDB-injected breakpoints in front */
1396 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1398 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1400 breakpoint_invalidate(env, pc);
1410 /* Remove a specific breakpoint. */
1411 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1413 #if defined(TARGET_HAS_ICE)
1416 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1417 if (bp->pc == pc && bp->flags == flags) {
1418 cpu_breakpoint_remove_by_ref(env, bp);
1428 /* Remove a specific breakpoint by reference. */
1429 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1431 #if defined(TARGET_HAS_ICE)
1432 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1434 breakpoint_invalidate(env, breakpoint->pc);
1436 qemu_free(breakpoint);
1440 /* Remove all matching breakpoints. */
1441 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1443 #if defined(TARGET_HAS_ICE)
1444 CPUBreakpoint *bp, *next;
1446 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1447 if (bp->flags & mask)
1448 cpu_breakpoint_remove_by_ref(env, bp);
1453 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1454 CPU loop after each instruction */
1455 void cpu_single_step(CPUState *env, int enabled)
1457 #if defined(TARGET_HAS_ICE)
1458 if (env->singlestep_enabled != enabled) {
1459 env->singlestep_enabled = enabled;
1460 /* must flush all the translated code to avoid inconsistancies */
1461 /* XXX: only flush what is necessary */
1467 /* enable or disable low levels log */
1468 void cpu_set_log(int log_flags)
1470 loglevel = log_flags;
1471 if (loglevel && !logfile) {
1472 logfile = fopen(logfilename, log_append ? "a" : "w");
1474 perror(logfilename);
1477 #if !defined(CONFIG_SOFTMMU)
1478 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1480 static char logfile_buf[4096];
1481 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1484 setvbuf(logfile, NULL, _IOLBF, 0);
1488 if (!loglevel && logfile) {
1494 void cpu_set_log_filename(const char *filename)
1496 logfilename = strdup(filename);
1501 cpu_set_log(loglevel);
1504 /* mask must never be zero, except for A20 change call */
1505 void cpu_interrupt(CPUState *env, int mask)
1507 #if !defined(USE_NPTL)
1508 TranslationBlock *tb;
1509 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1513 old_mask = env->interrupt_request;
1514 /* FIXME: This is probably not threadsafe. A different thread could
1515 be in the middle of a read-modify-write operation. */
1516 env->interrupt_request |= mask;
1517 #if defined(USE_NPTL)
1518 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1519 problem and hope the cpu will stop of its own accord. For userspace
1520 emulation this often isn't actually as bad as it sounds. Often
1521 signals are used primarily to interrupt blocking syscalls. */
1524 env->icount_decr.u16.high = 0xffff;
1525 #ifndef CONFIG_USER_ONLY
1526 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1527 an async event happened and we need to process it. */
1529 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1530 cpu_abort(env, "Raised interrupt while not in I/O function");
1534 tb = env->current_tb;
1535 /* if the cpu is currently executing code, we must unlink it and
1536 all the potentially executing TB */
1537 if (tb && !testandset(&interrupt_lock)) {
1538 env->current_tb = NULL;
1539 tb_reset_jump_recursive(tb);
1540 resetlock(&interrupt_lock);
1546 void cpu_reset_interrupt(CPUState *env, int mask)
1548 env->interrupt_request &= ~mask;
1551 const CPULogItem cpu_log_items[] = {
1552 { CPU_LOG_TB_OUT_ASM, "out_asm",
1553 "show generated host assembly code for each compiled TB" },
1554 { CPU_LOG_TB_IN_ASM, "in_asm",
1555 "show target assembly code for each compiled TB" },
1556 { CPU_LOG_TB_OP, "op",
1557 "show micro ops for each compiled TB" },
1558 { CPU_LOG_TB_OP_OPT, "op_opt",
1561 "before eflags optimization and "
1563 "after liveness analysis" },
1564 { CPU_LOG_INT, "int",
1565 "show interrupts/exceptions in short format" },
1566 { CPU_LOG_EXEC, "exec",
1567 "show trace before each executed TB (lots of logs)" },
1568 { CPU_LOG_TB_CPU, "cpu",
1569 "show CPU state before block translation" },
1571 { CPU_LOG_PCALL, "pcall",
1572 "show protected mode far calls/returns/exceptions" },
1575 { CPU_LOG_IOPORT, "ioport",
1576 "show all i/o ports accesses" },
1581 static int cmp1(const char *s1, int n, const char *s2)
1583 if (strlen(s2) != n)
1585 return memcmp(s1, s2, n) == 0;
1588 /* takes a comma separated list of log masks. Return 0 if error. */
1589 int cpu_str_to_log_mask(const char *str)
1591 const CPULogItem *item;
1598 p1 = strchr(p, ',');
1601 if(cmp1(p,p1-p,"all")) {
1602 for(item = cpu_log_items; item->mask != 0; item++) {
1606 for(item = cpu_log_items; item->mask != 0; item++) {
1607 if (cmp1(p, p1 - p, item->name))
1621 void cpu_abort(CPUState *env, const char *fmt, ...)
1628 fprintf(stderr, "qemu: fatal: ");
1629 vfprintf(stderr, fmt, ap);
1630 fprintf(stderr, "\n");
1632 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1634 cpu_dump_state(env, stderr, fprintf, 0);
1637 fprintf(logfile, "qemu: fatal: ");
1638 vfprintf(logfile, fmt, ap2);
1639 fprintf(logfile, "\n");
1641 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1643 cpu_dump_state(env, logfile, fprintf, 0);
1653 CPUState *cpu_copy(CPUState *env)
1655 CPUState *new_env = cpu_init(env->cpu_model_str);
1656 /* preserve chaining and index */
1657 CPUState *next_cpu = new_env->next_cpu;
1658 int cpu_index = new_env->cpu_index;
1659 memcpy(new_env, env, sizeof(CPUState));
1660 new_env->next_cpu = next_cpu;
1661 new_env->cpu_index = cpu_index;
1665 #if !defined(CONFIG_USER_ONLY)
1667 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1671 /* Discard jump cache entries for any tb which might potentially
1672 overlap the flushed page. */
1673 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1674 memset (&env->tb_jmp_cache[i], 0,
1675 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1677 i = tb_jmp_cache_hash_page(addr);
1678 memset (&env->tb_jmp_cache[i], 0,
1679 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1682 /* NOTE: if flush_global is true, also flush global entries (not
1684 void tlb_flush(CPUState *env, int flush_global)
1688 #if defined(DEBUG_TLB)
1689 printf("tlb_flush:\n");
1691 /* must reset current TB so that interrupts cannot modify the
1692 links while we are modifying them */
1693 env->current_tb = NULL;
1695 for(i = 0; i < CPU_TLB_SIZE; i++) {
1696 env->tlb_table[0][i].addr_read = -1;
1697 env->tlb_table[0][i].addr_write = -1;
1698 env->tlb_table[0][i].addr_code = -1;
1699 env->tlb_table[1][i].addr_read = -1;
1700 env->tlb_table[1][i].addr_write = -1;
1701 env->tlb_table[1][i].addr_code = -1;
1702 #if (NB_MMU_MODES >= 3)
1703 env->tlb_table[2][i].addr_read = -1;
1704 env->tlb_table[2][i].addr_write = -1;
1705 env->tlb_table[2][i].addr_code = -1;
1706 #if (NB_MMU_MODES == 4)
1707 env->tlb_table[3][i].addr_read = -1;
1708 env->tlb_table[3][i].addr_write = -1;
1709 env->tlb_table[3][i].addr_code = -1;
1714 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1717 if (env->kqemu_enabled) {
1718 kqemu_flush(env, flush_global);
1724 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1726 if (addr == (tlb_entry->addr_read &
1727 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1728 addr == (tlb_entry->addr_write &
1729 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1730 addr == (tlb_entry->addr_code &
1731 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1732 tlb_entry->addr_read = -1;
1733 tlb_entry->addr_write = -1;
1734 tlb_entry->addr_code = -1;
1738 void tlb_flush_page(CPUState *env, target_ulong addr)
1742 #if defined(DEBUG_TLB)
1743 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1745 /* must reset current TB so that interrupts cannot modify the
1746 links while we are modifying them */
1747 env->current_tb = NULL;
1749 addr &= TARGET_PAGE_MASK;
1750 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1751 tlb_flush_entry(&env->tlb_table[0][i], addr);
1752 tlb_flush_entry(&env->tlb_table[1][i], addr);
1753 #if (NB_MMU_MODES >= 3)
1754 tlb_flush_entry(&env->tlb_table[2][i], addr);
1755 #if (NB_MMU_MODES == 4)
1756 tlb_flush_entry(&env->tlb_table[3][i], addr);
1760 tlb_flush_jmp_cache(env, addr);
1763 if (env->kqemu_enabled) {
1764 kqemu_flush_page(env, addr);
1769 /* update the TLBs so that writes to code in the virtual page 'addr'
1771 static void tlb_protect_code(ram_addr_t ram_addr)
1773 cpu_physical_memory_reset_dirty(ram_addr,
1774 ram_addr + TARGET_PAGE_SIZE,
1778 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1779 tested for self modifying code */
1780 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1783 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1786 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1787 unsigned long start, unsigned long length)
1790 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1791 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1792 if ((addr - start) < length) {
1793 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1798 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1802 unsigned long length, start1;
1806 start &= TARGET_PAGE_MASK;
1807 end = TARGET_PAGE_ALIGN(end);
1809 length = end - start;
1812 len = length >> TARGET_PAGE_BITS;
1814 /* XXX: should not depend on cpu context */
1816 if (env->kqemu_enabled) {
1819 for(i = 0; i < len; i++) {
1820 kqemu_set_notdirty(env, addr);
1821 addr += TARGET_PAGE_SIZE;
1825 mask = ~dirty_flags;
1826 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1827 for(i = 0; i < len; i++)
1830 /* we modify the TLB cache so that the dirty bit will be set again
1831 when accessing the range */
1832 start1 = start + (unsigned long)phys_ram_base;
1833 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1834 for(i = 0; i < CPU_TLB_SIZE; i++)
1835 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1836 for(i = 0; i < CPU_TLB_SIZE; i++)
1837 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1838 #if (NB_MMU_MODES >= 3)
1839 for(i = 0; i < CPU_TLB_SIZE; i++)
1840 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1841 #if (NB_MMU_MODES == 4)
1842 for(i = 0; i < CPU_TLB_SIZE; i++)
1843 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1849 int cpu_physical_memory_set_dirty_tracking(int enable)
1851 in_migration = enable;
1855 int cpu_physical_memory_get_dirty_tracking(void)
1857 return in_migration;
1860 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1863 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1866 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1868 ram_addr_t ram_addr;
1870 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1871 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1872 tlb_entry->addend - (unsigned long)phys_ram_base;
1873 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1874 tlb_entry->addr_write |= TLB_NOTDIRTY;
1879 /* update the TLB according to the current state of the dirty bits */
1880 void cpu_tlb_update_dirty(CPUState *env)
1883 for(i = 0; i < CPU_TLB_SIZE; i++)
1884 tlb_update_dirty(&env->tlb_table[0][i]);
1885 for(i = 0; i < CPU_TLB_SIZE; i++)
1886 tlb_update_dirty(&env->tlb_table[1][i]);
1887 #if (NB_MMU_MODES >= 3)
1888 for(i = 0; i < CPU_TLB_SIZE; i++)
1889 tlb_update_dirty(&env->tlb_table[2][i]);
1890 #if (NB_MMU_MODES == 4)
1891 for(i = 0; i < CPU_TLB_SIZE; i++)
1892 tlb_update_dirty(&env->tlb_table[3][i]);
1897 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1899 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1900 tlb_entry->addr_write = vaddr;
1903 /* update the TLB corresponding to virtual page vaddr
1904 so that it is no longer dirty */
1905 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1909 vaddr &= TARGET_PAGE_MASK;
1910 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1911 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1912 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1913 #if (NB_MMU_MODES >= 3)
1914 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1915 #if (NB_MMU_MODES == 4)
1916 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1921 /* add a new TLB entry. At most one entry for a given virtual address
1922 is permitted. Return 0 if OK or 2 if the page could not be mapped
1923 (can only happen in non SOFTMMU mode for I/O pages or pages
1924 conflicting with the host address space). */
1925 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1926 target_phys_addr_t paddr, int prot,
1927 int mmu_idx, int is_softmmu)
1932 target_ulong address;
1933 target_ulong code_address;
1934 target_phys_addr_t addend;
1938 target_phys_addr_t iotlb;
1940 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1942 pd = IO_MEM_UNASSIGNED;
1944 pd = p->phys_offset;
1946 #if defined(DEBUG_TLB)
1947 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1948 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1953 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1954 /* IO memory case (romd handled later) */
1955 address |= TLB_MMIO;
1957 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1958 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1960 iotlb = pd & TARGET_PAGE_MASK;
1961 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1962 iotlb |= IO_MEM_NOTDIRTY;
1964 iotlb |= IO_MEM_ROM;
1966 /* IO handlers are currently passed a phsical address.
1967 It would be nice to pass an offset from the base address
1968 of that region. This would avoid having to special case RAM,
1969 and avoid full address decoding in every device.
1970 We can't use the high bits of pd for this because
1971 IO_MEM_ROMD uses these as a ram address. */
1972 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1975 code_address = address;
1976 /* Make accesses to pages with watchpoints go via the
1977 watchpoint trap routines. */
1978 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1979 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1980 iotlb = io_mem_watch + paddr;
1981 /* TODO: The memory case can be optimized by not trapping
1982 reads of pages with a write breakpoint. */
1983 address |= TLB_MMIO;
1987 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1988 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1989 te = &env->tlb_table[mmu_idx][index];
1990 te->addend = addend - vaddr;
1991 if (prot & PAGE_READ) {
1992 te->addr_read = address;
1997 if (prot & PAGE_EXEC) {
1998 te->addr_code = code_address;
2002 if (prot & PAGE_WRITE) {
2003 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2004 (pd & IO_MEM_ROMD)) {
2005 /* Write access calls the I/O callback. */
2006 te->addr_write = address | TLB_MMIO;
2007 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2008 !cpu_physical_memory_is_dirty(pd)) {
2009 te->addr_write = address | TLB_NOTDIRTY;
2011 te->addr_write = address;
2014 te->addr_write = -1;
2021 void tlb_flush(CPUState *env, int flush_global)
2025 void tlb_flush_page(CPUState *env, target_ulong addr)
2029 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2030 target_phys_addr_t paddr, int prot,
2031 int mmu_idx, int is_softmmu)
2036 /* dump memory mappings */
2037 void page_dump(FILE *f)
2039 unsigned long start, end;
2040 int i, j, prot, prot1;
2043 fprintf(f, "%-8s %-8s %-8s %s\n",
2044 "start", "end", "size", "prot");
2048 for(i = 0; i <= L1_SIZE; i++) {
2053 for(j = 0;j < L2_SIZE; j++) {
2058 if (prot1 != prot) {
2059 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2061 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2062 start, end, end - start,
2063 prot & PAGE_READ ? 'r' : '-',
2064 prot & PAGE_WRITE ? 'w' : '-',
2065 prot & PAGE_EXEC ? 'x' : '-');
2079 int page_get_flags(target_ulong address)
2083 p = page_find(address >> TARGET_PAGE_BITS);
2089 /* modify the flags of a page and invalidate the code if
2090 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2091 depending on PAGE_WRITE */
2092 void page_set_flags(target_ulong start, target_ulong end, int flags)
2097 /* mmap_lock should already be held. */
2098 start = start & TARGET_PAGE_MASK;
2099 end = TARGET_PAGE_ALIGN(end);
2100 if (flags & PAGE_WRITE)
2101 flags |= PAGE_WRITE_ORG;
2102 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2103 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2104 /* We may be called for host regions that are outside guest
2108 /* if the write protection is set, then we invalidate the code
2110 if (!(p->flags & PAGE_WRITE) &&
2111 (flags & PAGE_WRITE) &&
2113 tb_invalidate_phys_page(addr, 0, NULL);
2119 int page_check_range(target_ulong start, target_ulong len, int flags)
2125 if (start + len < start)
2126 /* we've wrapped around */
2129 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2130 start = start & TARGET_PAGE_MASK;
2132 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2133 p = page_find(addr >> TARGET_PAGE_BITS);
2136 if( !(p->flags & PAGE_VALID) )
2139 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2141 if (flags & PAGE_WRITE) {
2142 if (!(p->flags & PAGE_WRITE_ORG))
2144 /* unprotect the page if it was put read-only because it
2145 contains translated code */
2146 if (!(p->flags & PAGE_WRITE)) {
2147 if (!page_unprotect(addr, 0, NULL))
2156 /* called from signal handler: invalidate the code and unprotect the
2157 page. Return TRUE if the fault was succesfully handled. */
2158 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2160 unsigned int page_index, prot, pindex;
2162 target_ulong host_start, host_end, addr;
2164 /* Technically this isn't safe inside a signal handler. However we
2165 know this only ever happens in a synchronous SEGV handler, so in
2166 practice it seems to be ok. */
2169 host_start = address & qemu_host_page_mask;
2170 page_index = host_start >> TARGET_PAGE_BITS;
2171 p1 = page_find(page_index);
2176 host_end = host_start + qemu_host_page_size;
2179 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2183 /* if the page was really writable, then we change its
2184 protection back to writable */
2185 if (prot & PAGE_WRITE_ORG) {
2186 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2187 if (!(p1[pindex].flags & PAGE_WRITE)) {
2188 mprotect((void *)g2h(host_start), qemu_host_page_size,
2189 (prot & PAGE_BITS) | PAGE_WRITE);
2190 p1[pindex].flags |= PAGE_WRITE;
2191 /* and since the content will be modified, we must invalidate
2192 the corresponding translated code. */
2193 tb_invalidate_phys_page(address, pc, puc);
2194 #ifdef DEBUG_TB_CHECK
2195 tb_invalidate_check(address);
2205 static inline void tlb_set_dirty(CPUState *env,
2206 unsigned long addr, target_ulong vaddr)
2209 #endif /* defined(CONFIG_USER_ONLY) */
2211 #if !defined(CONFIG_USER_ONLY)
2212 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2214 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2215 ram_addr_t orig_memory);
2216 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2219 if (addr > start_addr) \
2222 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2223 if (start_addr2 > 0) \
2227 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2228 end_addr2 = TARGET_PAGE_SIZE - 1; \
2230 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2231 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2236 /* register physical memory. 'size' must be a multiple of the target
2237 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2239 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2241 ram_addr_t phys_offset)
2243 target_phys_addr_t addr, end_addr;
2246 ram_addr_t orig_size = size;
2250 /* XXX: should not depend on cpu context */
2252 if (env->kqemu_enabled) {
2253 kqemu_set_phys_mem(start_addr, size, phys_offset);
2257 kvm_set_phys_mem(start_addr, size, phys_offset);
2259 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2260 end_addr = start_addr + (target_phys_addr_t)size;
2261 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2262 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2263 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2264 ram_addr_t orig_memory = p->phys_offset;
2265 target_phys_addr_t start_addr2, end_addr2;
2266 int need_subpage = 0;
2268 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2270 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2271 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2272 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2273 &p->phys_offset, orig_memory);
2275 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2278 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2280 p->phys_offset = phys_offset;
2281 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2282 (phys_offset & IO_MEM_ROMD))
2283 phys_offset += TARGET_PAGE_SIZE;
2286 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2287 p->phys_offset = phys_offset;
2288 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2289 (phys_offset & IO_MEM_ROMD))
2290 phys_offset += TARGET_PAGE_SIZE;
2292 target_phys_addr_t start_addr2, end_addr2;
2293 int need_subpage = 0;
2295 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2296 end_addr2, need_subpage);
2298 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2299 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2300 &p->phys_offset, IO_MEM_UNASSIGNED);
2301 subpage_register(subpage, start_addr2, end_addr2,
2308 /* since each CPU stores ram addresses in its TLB cache, we must
2309 reset the modified entries */
2311 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2316 /* XXX: temporary until new memory mapping API */
2317 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2321 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2323 return IO_MEM_UNASSIGNED;
2324 return p->phys_offset;
2327 /* XXX: better than nothing */
2328 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2331 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2332 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2333 (uint64_t)size, (uint64_t)phys_ram_size);
2336 addr = phys_ram_alloc_offset;
2337 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2341 void qemu_ram_free(ram_addr_t addr)
2345 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2347 #ifdef DEBUG_UNASSIGNED
2348 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2350 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2351 do_unassigned_access(addr, 0, 0, 0, 1);
2356 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2358 #ifdef DEBUG_UNASSIGNED
2359 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2361 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2362 do_unassigned_access(addr, 0, 0, 0, 2);
2367 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2369 #ifdef DEBUG_UNASSIGNED
2370 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2372 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2373 do_unassigned_access(addr, 0, 0, 0, 4);
2378 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2380 #ifdef DEBUG_UNASSIGNED
2381 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2383 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2384 do_unassigned_access(addr, 1, 0, 0, 1);
2388 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2390 #ifdef DEBUG_UNASSIGNED
2391 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2393 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2394 do_unassigned_access(addr, 1, 0, 0, 2);
2398 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2400 #ifdef DEBUG_UNASSIGNED
2401 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2403 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2404 do_unassigned_access(addr, 1, 0, 0, 4);
2408 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2409 unassigned_mem_readb,
2410 unassigned_mem_readw,
2411 unassigned_mem_readl,
2414 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2415 unassigned_mem_writeb,
2416 unassigned_mem_writew,
2417 unassigned_mem_writel,
2420 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2424 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2425 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2426 #if !defined(CONFIG_USER_ONLY)
2427 tb_invalidate_phys_page_fast(ram_addr, 1);
2428 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2431 stb_p(phys_ram_base + ram_addr, val);
2433 if (cpu_single_env->kqemu_enabled &&
2434 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2435 kqemu_modify_page(cpu_single_env, ram_addr);
2437 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2438 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2439 /* we remove the notdirty callback only if the code has been
2441 if (dirty_flags == 0xff)
2442 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2445 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2449 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2450 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2451 #if !defined(CONFIG_USER_ONLY)
2452 tb_invalidate_phys_page_fast(ram_addr, 2);
2453 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2456 stw_p(phys_ram_base + ram_addr, val);
2458 if (cpu_single_env->kqemu_enabled &&
2459 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2460 kqemu_modify_page(cpu_single_env, ram_addr);
2462 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2463 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2464 /* we remove the notdirty callback only if the code has been
2466 if (dirty_flags == 0xff)
2467 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2470 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2474 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2475 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2476 #if !defined(CONFIG_USER_ONLY)
2477 tb_invalidate_phys_page_fast(ram_addr, 4);
2478 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2481 stl_p(phys_ram_base + ram_addr, val);
2483 if (cpu_single_env->kqemu_enabled &&
2484 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2485 kqemu_modify_page(cpu_single_env, ram_addr);
2487 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2488 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2489 /* we remove the notdirty callback only if the code has been
2491 if (dirty_flags == 0xff)
2492 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2495 static CPUReadMemoryFunc *error_mem_read[3] = {
2496 NULL, /* never used */
2497 NULL, /* never used */
2498 NULL, /* never used */
2501 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2502 notdirty_mem_writeb,
2503 notdirty_mem_writew,
2504 notdirty_mem_writel,
2507 /* Generate a debug exception if a watchpoint has been hit. */
2508 static void check_watchpoint(int offset, int len_mask, int flags)
2510 CPUState *env = cpu_single_env;
2511 target_ulong pc, cs_base;
2512 TranslationBlock *tb;
2517 if (env->watchpoint_hit) {
2518 /* We re-entered the check after replacing the TB. Now raise
2519 * the debug interrupt so that is will trigger after the
2520 * current instruction. */
2521 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2524 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2525 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2526 if ((vaddr == (wp->vaddr & len_mask) ||
2527 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2528 wp->flags |= BP_WATCHPOINT_HIT;
2529 if (!env->watchpoint_hit) {
2530 env->watchpoint_hit = wp;
2531 tb = tb_find_pc(env->mem_io_pc);
2533 cpu_abort(env, "check_watchpoint: could not find TB for "
2534 "pc=%p", (void *)env->mem_io_pc);
2536 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2537 tb_phys_invalidate(tb, -1);
2538 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2539 env->exception_index = EXCP_DEBUG;
2541 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2542 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2544 cpu_resume_from_signal(env, NULL);
2547 wp->flags &= ~BP_WATCHPOINT_HIT;
2552 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2553 so these check for a hit then pass through to the normal out-of-line
2555 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2557 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2558 return ldub_phys(addr);
2561 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2563 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2564 return lduw_phys(addr);
2567 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2569 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2570 return ldl_phys(addr);
2573 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2576 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2577 stb_phys(addr, val);
2580 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2583 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2584 stw_phys(addr, val);
2587 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2590 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2591 stl_phys(addr, val);
2594 static CPUReadMemoryFunc *watch_mem_read[3] = {
2600 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2606 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2612 idx = SUBPAGE_IDX(addr - mmio->base);
2613 #if defined(DEBUG_SUBPAGE)
2614 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2615 mmio, len, addr, idx);
2617 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2622 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2623 uint32_t value, unsigned int len)
2627 idx = SUBPAGE_IDX(addr - mmio->base);
2628 #if defined(DEBUG_SUBPAGE)
2629 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2630 mmio, len, addr, idx, value);
2632 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2635 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2637 #if defined(DEBUG_SUBPAGE)
2638 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2641 return subpage_readlen(opaque, addr, 0);
2644 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2647 #if defined(DEBUG_SUBPAGE)
2648 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2650 subpage_writelen(opaque, addr, value, 0);
2653 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2655 #if defined(DEBUG_SUBPAGE)
2656 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2659 return subpage_readlen(opaque, addr, 1);
2662 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2665 #if defined(DEBUG_SUBPAGE)
2666 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2668 subpage_writelen(opaque, addr, value, 1);
2671 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2673 #if defined(DEBUG_SUBPAGE)
2674 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2677 return subpage_readlen(opaque, addr, 2);
2680 static void subpage_writel (void *opaque,
2681 target_phys_addr_t addr, uint32_t value)
2683 #if defined(DEBUG_SUBPAGE)
2684 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2686 subpage_writelen(opaque, addr, value, 2);
2689 static CPUReadMemoryFunc *subpage_read[] = {
2695 static CPUWriteMemoryFunc *subpage_write[] = {
2701 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2707 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2709 idx = SUBPAGE_IDX(start);
2710 eidx = SUBPAGE_IDX(end);
2711 #if defined(DEBUG_SUBPAGE)
2712 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2713 mmio, start, end, idx, eidx, memory);
2715 memory >>= IO_MEM_SHIFT;
2716 for (; idx <= eidx; idx++) {
2717 for (i = 0; i < 4; i++) {
2718 if (io_mem_read[memory][i]) {
2719 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2720 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2722 if (io_mem_write[memory][i]) {
2723 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2724 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2732 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2733 ram_addr_t orig_memory)
2738 mmio = qemu_mallocz(sizeof(subpage_t));
2741 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2742 #if defined(DEBUG_SUBPAGE)
2743 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2744 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2746 *phys = subpage_memory | IO_MEM_SUBPAGE;
2747 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2753 static void io_mem_init(void)
2755 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2756 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2757 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2760 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2761 watch_mem_write, NULL);
2762 /* alloc dirty bits array */
2763 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2764 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2767 /* mem_read and mem_write are arrays of functions containing the
2768 function to access byte (index 0), word (index 1) and dword (index
2769 2). Functions can be omitted with a NULL function pointer. The
2770 registered functions may be modified dynamically later.
2771 If io_index is non zero, the corresponding io zone is
2772 modified. If it is zero, a new io zone is allocated. The return
2773 value can be used with cpu_register_physical_memory(). (-1) is
2774 returned if error. */
2775 int cpu_register_io_memory(int io_index,
2776 CPUReadMemoryFunc **mem_read,
2777 CPUWriteMemoryFunc **mem_write,
2780 int i, subwidth = 0;
2782 if (io_index <= 0) {
2783 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2785 io_index = io_mem_nb++;
2787 if (io_index >= IO_MEM_NB_ENTRIES)
2791 for(i = 0;i < 3; i++) {
2792 if (!mem_read[i] || !mem_write[i])
2793 subwidth = IO_MEM_SUBWIDTH;
2794 io_mem_read[io_index][i] = mem_read[i];
2795 io_mem_write[io_index][i] = mem_write[i];
2797 io_mem_opaque[io_index] = opaque;
2798 return (io_index << IO_MEM_SHIFT) | subwidth;
2801 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2803 return io_mem_write[io_index >> IO_MEM_SHIFT];
2806 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2808 return io_mem_read[io_index >> IO_MEM_SHIFT];
2811 #endif /* !defined(CONFIG_USER_ONLY) */
2813 /* physical memory access (slow version, mainly for debug) */
2814 #if defined(CONFIG_USER_ONLY)
2815 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2816 int len, int is_write)
2823 page = addr & TARGET_PAGE_MASK;
2824 l = (page + TARGET_PAGE_SIZE) - addr;
2827 flags = page_get_flags(page);
2828 if (!(flags & PAGE_VALID))
2831 if (!(flags & PAGE_WRITE))
2833 /* XXX: this code should not depend on lock_user */
2834 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2835 /* FIXME - should this return an error rather than just fail? */
2838 unlock_user(p, addr, l);
2840 if (!(flags & PAGE_READ))
2842 /* XXX: this code should not depend on lock_user */
2843 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2844 /* FIXME - should this return an error rather than just fail? */
2847 unlock_user(p, addr, 0);
2856 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2857 int len, int is_write)
2862 target_phys_addr_t page;
2867 page = addr & TARGET_PAGE_MASK;
2868 l = (page + TARGET_PAGE_SIZE) - addr;
2871 p = phys_page_find(page >> TARGET_PAGE_BITS);
2873 pd = IO_MEM_UNASSIGNED;
2875 pd = p->phys_offset;
2879 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2880 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2881 /* XXX: could force cpu_single_env to NULL to avoid
2883 if (l >= 4 && ((addr & 3) == 0)) {
2884 /* 32 bit write access */
2886 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2888 } else if (l >= 2 && ((addr & 1) == 0)) {
2889 /* 16 bit write access */
2891 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2894 /* 8 bit write access */
2896 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2900 unsigned long addr1;
2901 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2903 ptr = phys_ram_base + addr1;
2904 memcpy(ptr, buf, l);
2905 if (!cpu_physical_memory_is_dirty(addr1)) {
2906 /* invalidate code */
2907 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2909 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2910 (0xff & ~CODE_DIRTY_FLAG);
2914 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2915 !(pd & IO_MEM_ROMD)) {
2917 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2918 if (l >= 4 && ((addr & 3) == 0)) {
2919 /* 32 bit read access */
2920 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2923 } else if (l >= 2 && ((addr & 1) == 0)) {
2924 /* 16 bit read access */
2925 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2929 /* 8 bit read access */
2930 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2936 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2937 (addr & ~TARGET_PAGE_MASK);
2938 memcpy(buf, ptr, l);
2947 /* used for ROM loading : can write in RAM and ROM */
2948 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2949 const uint8_t *buf, int len)
2953 target_phys_addr_t page;
2958 page = addr & TARGET_PAGE_MASK;
2959 l = (page + TARGET_PAGE_SIZE) - addr;
2962 p = phys_page_find(page >> TARGET_PAGE_BITS);
2964 pd = IO_MEM_UNASSIGNED;
2966 pd = p->phys_offset;
2969 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2970 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2971 !(pd & IO_MEM_ROMD)) {
2974 unsigned long addr1;
2975 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2977 ptr = phys_ram_base + addr1;
2978 memcpy(ptr, buf, l);
2987 /* warning: addr must be aligned */
2988 uint32_t ldl_phys(target_phys_addr_t addr)
2996 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2998 pd = IO_MEM_UNASSIGNED;
3000 pd = p->phys_offset;
3003 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3004 !(pd & IO_MEM_ROMD)) {
3006 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3007 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3010 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3011 (addr & ~TARGET_PAGE_MASK);
3017 /* warning: addr must be aligned */
3018 uint64_t ldq_phys(target_phys_addr_t addr)
3026 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3028 pd = IO_MEM_UNASSIGNED;
3030 pd = p->phys_offset;
3033 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3034 !(pd & IO_MEM_ROMD)) {
3036 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3037 #ifdef TARGET_WORDS_BIGENDIAN
3038 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3039 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3041 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3042 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3046 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3047 (addr & ~TARGET_PAGE_MASK);
3054 uint32_t ldub_phys(target_phys_addr_t addr)
3057 cpu_physical_memory_read(addr, &val, 1);
3062 uint32_t lduw_phys(target_phys_addr_t addr)
3065 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3066 return tswap16(val);
3069 /* warning: addr must be aligned. The ram page is not masked as dirty
3070 and the code inside is not invalidated. It is useful if the dirty
3071 bits are used to track modified PTEs */
3072 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3079 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3081 pd = IO_MEM_UNASSIGNED;
3083 pd = p->phys_offset;
3086 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3087 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3088 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3090 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3091 ptr = phys_ram_base + addr1;
3094 if (unlikely(in_migration)) {
3095 if (!cpu_physical_memory_is_dirty(addr1)) {
3096 /* invalidate code */
3097 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3099 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3100 (0xff & ~CODE_DIRTY_FLAG);
3106 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3113 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3115 pd = IO_MEM_UNASSIGNED;
3117 pd = p->phys_offset;
3120 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3121 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3122 #ifdef TARGET_WORDS_BIGENDIAN
3123 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3124 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3126 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3127 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3130 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3131 (addr & ~TARGET_PAGE_MASK);
3136 /* warning: addr must be aligned */
3137 void stl_phys(target_phys_addr_t addr, uint32_t val)
3144 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3146 pd = IO_MEM_UNASSIGNED;
3148 pd = p->phys_offset;
3151 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3152 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3153 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3155 unsigned long addr1;
3156 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3158 ptr = phys_ram_base + addr1;
3160 if (!cpu_physical_memory_is_dirty(addr1)) {
3161 /* invalidate code */
3162 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3164 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3165 (0xff & ~CODE_DIRTY_FLAG);
3171 void stb_phys(target_phys_addr_t addr, uint32_t val)
3174 cpu_physical_memory_write(addr, &v, 1);
3178 void stw_phys(target_phys_addr_t addr, uint32_t val)
3180 uint16_t v = tswap16(val);
3181 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3185 void stq_phys(target_phys_addr_t addr, uint64_t val)
3188 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3193 /* virtual memory access for debug */
3194 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3195 uint8_t *buf, int len, int is_write)
3198 target_phys_addr_t phys_addr;
3202 page = addr & TARGET_PAGE_MASK;
3203 phys_addr = cpu_get_phys_page_debug(env, page);
3204 /* if no physical page mapped, return an error */
3205 if (phys_addr == -1)
3207 l = (page + TARGET_PAGE_SIZE) - addr;
3210 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3219 /* in deterministic execution mode, instructions doing device I/Os
3220 must be at the end of the TB */
3221 void cpu_io_recompile(CPUState *env, void *retaddr)
3223 TranslationBlock *tb;
3225 target_ulong pc, cs_base;
3228 tb = tb_find_pc((unsigned long)retaddr);
3230 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3233 n = env->icount_decr.u16.low + tb->icount;
3234 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3235 /* Calculate how many instructions had been executed before the fault
3237 n = n - env->icount_decr.u16.low;
3238 /* Generate a new TB ending on the I/O insn. */
3240 /* On MIPS and SH, delay slot instructions can only be restarted if
3241 they were already the first instruction in the TB. If this is not
3242 the first instruction in a TB then re-execute the preceding
3244 #if defined(TARGET_MIPS)
3245 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3246 env->active_tc.PC -= 4;
3247 env->icount_decr.u16.low++;
3248 env->hflags &= ~MIPS_HFLAG_BMASK;
3250 #elif defined(TARGET_SH4)
3251 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3254 env->icount_decr.u16.low++;
3255 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3258 /* This should never happen. */
3259 if (n > CF_COUNT_MASK)
3260 cpu_abort(env, "TB too big during recompile");
3262 cflags = n | CF_LAST_IO;
3264 cs_base = tb->cs_base;
3266 tb_phys_invalidate(tb, -1);
3267 /* FIXME: In theory this could raise an exception. In practice
3268 we have already translated the block once so it's probably ok. */
3269 tb_gen_code(env, pc, cs_base, flags, cflags);
3270 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3271 the first in the TB) then we end up generating a whole new TB and
3272 repeating the fault, which is horribly inefficient.
3273 Better would be to execute just this insn uncached, or generate a
3275 cpu_resume_from_signal(env, NULL);
3278 void dump_exec_info(FILE *f,
3279 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3281 int i, target_code_size, max_target_code_size;
3282 int direct_jmp_count, direct_jmp2_count, cross_page;
3283 TranslationBlock *tb;
3285 target_code_size = 0;
3286 max_target_code_size = 0;
3288 direct_jmp_count = 0;
3289 direct_jmp2_count = 0;
3290 for(i = 0; i < nb_tbs; i++) {
3292 target_code_size += tb->size;
3293 if (tb->size > max_target_code_size)
3294 max_target_code_size = tb->size;
3295 if (tb->page_addr[1] != -1)
3297 if (tb->tb_next_offset[0] != 0xffff) {
3299 if (tb->tb_next_offset[1] != 0xffff) {
3300 direct_jmp2_count++;
3304 /* XXX: avoid using doubles ? */
3305 cpu_fprintf(f, "Translation buffer state:\n");
3306 cpu_fprintf(f, "gen code size %ld/%ld\n",
3307 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3308 cpu_fprintf(f, "TB count %d/%d\n",
3309 nb_tbs, code_gen_max_blocks);
3310 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3311 nb_tbs ? target_code_size / nb_tbs : 0,
3312 max_target_code_size);
3313 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3314 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3315 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3316 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3318 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3319 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3321 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3323 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3324 cpu_fprintf(f, "\nStatistics:\n");
3325 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3326 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3327 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3328 tcg_dump_info(f, cpu_fprintf);
3331 #if !defined(CONFIG_USER_ONLY)
3333 #define MMUSUFFIX _cmmu
3334 #define GETPC() NULL
3335 #define env cpu_single_env
3336 #define SOFTMMU_CODE_ACCESS
3339 #include "softmmu_template.h"
3342 #include "softmmu_template.h"
3345 #include "softmmu_template.h"
3348 #include "softmmu_template.h"