2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
24 #include <sys/types.h>
37 #include "qemu-common.h"
42 #if defined(CONFIG_USER_ONLY)
46 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_SPARC)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69 #elif defined(TARGET_ALPHA)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 #define TARGET_VIRT_ADDR_SPACE_BITS 42
72 #elif defined(TARGET_PPC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 #define TARGET_PHYS_ADDR_SPACE_BITS 32
83 static TranslationBlock *tbs;
84 int code_gen_max_blocks;
85 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87 /* any access to the tbs or the page table must use this lock */
88 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90 #if defined(__arm__) || defined(__sparc_v9__)
91 /* The prologue must be reachable with a direct jump. ARM and Sparc64
92 have limited branch ranges (possibly also PPC) so place it in a
93 section close to code segment. */
94 #define code_gen_section \
95 __attribute__((__section__(".gen_code"))) \
96 __attribute__((aligned (32)))
98 #define code_gen_section \
99 __attribute__((aligned (32)))
102 uint8_t code_gen_prologue[1024] code_gen_section;
103 static uint8_t *code_gen_buffer;
104 static unsigned long code_gen_buffer_size;
105 /* threshold to flush the translated code buffer */
106 static unsigned long code_gen_buffer_max_size;
107 uint8_t *code_gen_ptr;
109 #if !defined(CONFIG_USER_ONLY)
110 ram_addr_t phys_ram_size;
112 uint8_t *phys_ram_base;
113 uint8_t *phys_ram_dirty;
114 static int in_migration;
115 static ram_addr_t phys_ram_alloc_offset = 0;
119 /* current CPU in the current thread. It is only valid inside
121 CPUState *cpu_single_env;
122 /* 0 = Do not count executed instructions.
123 1 = Precise instruction counting.
124 2 = Adaptive rate instruction counting. */
126 /* Current instruction counter. While executing translated code this may
127 include some instructions that have not yet been executed. */
130 typedef struct PageDesc {
131 /* list of TBs intersecting this ram page */
132 TranslationBlock *first_tb;
133 /* in order to optimize self modifying code, we count the number
134 of lookups we do to a given page to use a bitmap */
135 unsigned int code_write_count;
136 uint8_t *code_bitmap;
137 #if defined(CONFIG_USER_ONLY)
142 typedef struct PhysPageDesc {
143 /* offset in host memory of the page + io_index in the low bits */
144 ram_addr_t phys_offset;
145 ram_addr_t region_offset;
149 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150 /* XXX: this is a temporary hack for alpha target.
151 * In the future, this is to be replaced by a multi-level table
152 * to actually be able to handle the complete 64 bits address space.
154 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
156 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
159 #define L1_SIZE (1 << L1_BITS)
160 #define L2_SIZE (1 << L2_BITS)
162 unsigned long qemu_real_host_page_size;
163 unsigned long qemu_host_page_bits;
164 unsigned long qemu_host_page_size;
165 unsigned long qemu_host_page_mask;
167 /* XXX: for system emulation, it could just be an array */
168 static PageDesc *l1_map[L1_SIZE];
169 static PhysPageDesc **l1_phys_map;
171 #if !defined(CONFIG_USER_ONLY)
172 static void io_mem_init(void);
174 /* io memory support */
175 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
177 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
178 static char io_mem_used[IO_MEM_NB_ENTRIES];
179 static int io_mem_watch;
183 static const char *logfilename = "/tmp/qemu.log";
186 static int log_append = 0;
189 static int tlb_flush_count;
190 static int tb_flush_count;
191 static int tb_phys_invalidate_count;
193 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194 typedef struct subpage_t {
195 target_phys_addr_t base;
196 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198 void *opaque[TARGET_PAGE_SIZE][2][4];
199 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
203 static void map_exec(void *addr, long size)
206 VirtualProtect(addr, size,
207 PAGE_EXECUTE_READWRITE, &old_protect);
211 static void map_exec(void *addr, long size)
213 unsigned long start, end, page_size;
215 page_size = getpagesize();
216 start = (unsigned long)addr;
217 start &= ~(page_size - 1);
219 end = (unsigned long)addr + size;
220 end += page_size - 1;
221 end &= ~(page_size - 1);
223 mprotect((void *)start, end - start,
224 PROT_READ | PROT_WRITE | PROT_EXEC);
228 static void page_init(void)
230 /* NOTE: we can always suppose that qemu_host_page_size >=
234 SYSTEM_INFO system_info;
236 GetSystemInfo(&system_info);
237 qemu_real_host_page_size = system_info.dwPageSize;
240 qemu_real_host_page_size = getpagesize();
242 if (qemu_host_page_size == 0)
243 qemu_host_page_size = qemu_real_host_page_size;
244 if (qemu_host_page_size < TARGET_PAGE_SIZE)
245 qemu_host_page_size = TARGET_PAGE_SIZE;
246 qemu_host_page_bits = 0;
247 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248 qemu_host_page_bits++;
249 qemu_host_page_mask = ~(qemu_host_page_size - 1);
250 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
253 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
255 long long startaddr, endaddr;
260 last_brk = (unsigned long)sbrk(0);
261 f = fopen("/proc/self/maps", "r");
264 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
266 startaddr = MIN(startaddr,
267 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268 endaddr = MIN(endaddr,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270 page_set_flags(startaddr & TARGET_PAGE_MASK,
271 TARGET_PAGE_ALIGN(endaddr),
282 static inline PageDesc **page_l1_map(target_ulong index)
284 #if TARGET_LONG_BITS > 32
285 /* Host memory outside guest VM. For 32-bit targets we have already
286 excluded high addresses. */
287 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
290 return &l1_map[index >> L2_BITS];
293 static inline PageDesc *page_find_alloc(target_ulong index)
296 lp = page_l1_map(index);
302 /* allocate if not found */
303 #if defined(CONFIG_USER_ONLY)
304 size_t len = sizeof(PageDesc) * L2_SIZE;
305 /* Don't use qemu_malloc because it may recurse. */
306 p = mmap(0, len, PROT_READ | PROT_WRITE,
307 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
310 unsigned long addr = h2g(p);
311 page_set_flags(addr & TARGET_PAGE_MASK,
312 TARGET_PAGE_ALIGN(addr + len),
316 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
320 return p + (index & (L2_SIZE - 1));
323 static inline PageDesc *page_find(target_ulong index)
326 lp = page_l1_map(index);
333 return p + (index & (L2_SIZE - 1));
336 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
341 p = (void **)l1_phys_map;
342 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
344 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
345 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
347 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
350 /* allocate if not found */
353 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
354 memset(p, 0, sizeof(void *) * L1_SIZE);
358 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
362 /* allocate if not found */
365 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
367 for (i = 0; i < L2_SIZE; i++) {
368 pd[i].phys_offset = IO_MEM_UNASSIGNED;
369 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
372 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
375 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
377 return phys_page_find_alloc(index, 0);
380 #if !defined(CONFIG_USER_ONLY)
381 static void tlb_protect_code(ram_addr_t ram_addr);
382 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
384 #define mmap_lock() do { } while(0)
385 #define mmap_unlock() do { } while(0)
388 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
390 #if defined(CONFIG_USER_ONLY)
391 /* Currently it is not recommanded to allocate big chunks of data in
392 user mode. It will change when a dedicated libc will be used */
393 #define USE_STATIC_CODE_GEN_BUFFER
396 #ifdef USE_STATIC_CODE_GEN_BUFFER
397 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
400 static void code_gen_alloc(unsigned long tb_size)
402 #ifdef USE_STATIC_CODE_GEN_BUFFER
403 code_gen_buffer = static_code_gen_buffer;
404 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
405 map_exec(code_gen_buffer, code_gen_buffer_size);
407 code_gen_buffer_size = tb_size;
408 if (code_gen_buffer_size == 0) {
409 #if defined(CONFIG_USER_ONLY)
410 /* in user mode, phys_ram_size is not meaningful */
411 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413 /* XXX: needs ajustments */
414 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
417 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
418 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
419 /* The code gen buffer location may have constraints depending on
420 the host cpu and OS */
421 #if defined(__linux__)
426 flags = MAP_PRIVATE | MAP_ANONYMOUS;
427 #if defined(__x86_64__)
429 /* Cannot map more than that */
430 if (code_gen_buffer_size > (800 * 1024 * 1024))
431 code_gen_buffer_size = (800 * 1024 * 1024);
432 #elif defined(__sparc_v9__)
433 // Map the buffer below 2G, so we can use direct calls and branches
435 start = (void *) 0x60000000UL;
436 if (code_gen_buffer_size > (512 * 1024 * 1024))
437 code_gen_buffer_size = (512 * 1024 * 1024);
438 #elif defined(__arm__)
439 /* Map the buffer below 32M, so we can use direct calls and branches */
441 start = (void *) 0x01000000UL;
442 if (code_gen_buffer_size > 16 * 1024 * 1024)
443 code_gen_buffer_size = 16 * 1024 * 1024;
445 code_gen_buffer = mmap(start, code_gen_buffer_size,
446 PROT_WRITE | PROT_READ | PROT_EXEC,
448 if (code_gen_buffer == MAP_FAILED) {
449 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
453 #elif defined(__FreeBSD__) || defined(__DragonFly__)
457 flags = MAP_PRIVATE | MAP_ANONYMOUS;
458 #if defined(__x86_64__)
459 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
460 * 0x40000000 is free */
462 addr = (void *)0x40000000;
463 /* Cannot map more than that */
464 if (code_gen_buffer_size > (800 * 1024 * 1024))
465 code_gen_buffer_size = (800 * 1024 * 1024);
467 code_gen_buffer = mmap(addr, code_gen_buffer_size,
468 PROT_WRITE | PROT_READ | PROT_EXEC,
470 if (code_gen_buffer == MAP_FAILED) {
471 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
477 map_exec(code_gen_buffer, code_gen_buffer_size);
479 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
480 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
481 code_gen_buffer_max_size = code_gen_buffer_size -
482 code_gen_max_block_size();
483 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
484 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
487 /* Must be called before using the QEMU cpus. 'tb_size' is the size
488 (in bytes) allocated to the translation buffer. Zero means default
490 void cpu_exec_init_all(unsigned long tb_size)
493 code_gen_alloc(tb_size);
494 code_gen_ptr = code_gen_buffer;
496 #if !defined(CONFIG_USER_ONLY)
501 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
503 #define CPU_COMMON_SAVE_VERSION 1
505 static void cpu_common_save(QEMUFile *f, void *opaque)
507 CPUState *env = opaque;
509 qemu_put_be32s(f, &env->halted);
510 qemu_put_be32s(f, &env->interrupt_request);
513 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
515 CPUState *env = opaque;
517 if (version_id != CPU_COMMON_SAVE_VERSION)
520 qemu_get_be32s(f, &env->halted);
521 qemu_get_be32s(f, &env->interrupt_request);
522 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
523 version_id is increased. */
524 env->interrupt_request &= ~0x01;
531 void cpu_exec_init(CPUState *env)
536 #if defined(CONFIG_USER_ONLY)
539 env->next_cpu = NULL;
542 while (*penv != NULL) {
543 penv = (CPUState **)&(*penv)->next_cpu;
546 env->cpu_index = cpu_index;
547 TAILQ_INIT(&env->breakpoints);
548 TAILQ_INIT(&env->watchpoints);
550 #if defined(CONFIG_USER_ONLY)
553 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
554 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
555 cpu_common_save, cpu_common_load, env);
556 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
557 cpu_save, cpu_load, env);
561 static inline void invalidate_page_bitmap(PageDesc *p)
563 if (p->code_bitmap) {
564 qemu_free(p->code_bitmap);
565 p->code_bitmap = NULL;
567 p->code_write_count = 0;
570 /* set to NULL all the 'first_tb' fields in all PageDescs */
571 static void page_flush_tb(void)
576 for(i = 0; i < L1_SIZE; i++) {
579 for(j = 0; j < L2_SIZE; j++) {
581 invalidate_page_bitmap(p);
588 /* flush all the translation blocks */
589 /* XXX: tb_flush is currently not thread safe */
590 void tb_flush(CPUState *env1)
593 #if defined(DEBUG_FLUSH)
594 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
595 (unsigned long)(code_gen_ptr - code_gen_buffer),
597 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
599 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
600 cpu_abort(env1, "Internal error: code buffer overflow\n");
604 for(env = first_cpu; env != NULL; env = env->next_cpu) {
605 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
608 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
611 code_gen_ptr = code_gen_buffer;
612 /* XXX: flush processor icache at this point if cache flush is
617 #ifdef DEBUG_TB_CHECK
619 static void tb_invalidate_check(target_ulong address)
621 TranslationBlock *tb;
623 address &= TARGET_PAGE_MASK;
624 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
625 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
626 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
627 address >= tb->pc + tb->size)) {
628 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
629 address, (long)tb->pc, tb->size);
635 /* verify that all the pages have correct rights for code */
636 static void tb_page_check(void)
638 TranslationBlock *tb;
639 int i, flags1, flags2;
641 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
642 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
643 flags1 = page_get_flags(tb->pc);
644 flags2 = page_get_flags(tb->pc + tb->size - 1);
645 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
646 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
647 (long)tb->pc, tb->size, flags1, flags2);
653 static void tb_jmp_check(TranslationBlock *tb)
655 TranslationBlock *tb1;
658 /* suppress any remaining jumps to this TB */
662 tb1 = (TranslationBlock *)((long)tb1 & ~3);
665 tb1 = tb1->jmp_next[n1];
667 /* check end of list */
669 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
675 /* invalidate one TB */
676 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
679 TranslationBlock *tb1;
683 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
686 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
690 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
692 TranslationBlock *tb1;
698 tb1 = (TranslationBlock *)((long)tb1 & ~3);
700 *ptb = tb1->page_next[n1];
703 ptb = &tb1->page_next[n1];
707 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
709 TranslationBlock *tb1, **ptb;
712 ptb = &tb->jmp_next[n];
715 /* find tb(n) in circular list */
719 tb1 = (TranslationBlock *)((long)tb1 & ~3);
720 if (n1 == n && tb1 == tb)
723 ptb = &tb1->jmp_first;
725 ptb = &tb1->jmp_next[n1];
728 /* now we can suppress tb(n) from the list */
729 *ptb = tb->jmp_next[n];
731 tb->jmp_next[n] = NULL;
735 /* reset the jump entry 'n' of a TB so that it is not chained to
737 static inline void tb_reset_jump(TranslationBlock *tb, int n)
739 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
742 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
747 target_phys_addr_t phys_pc;
748 TranslationBlock *tb1, *tb2;
750 /* remove the TB from the hash list */
751 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
752 h = tb_phys_hash_func(phys_pc);
753 tb_remove(&tb_phys_hash[h], tb,
754 offsetof(TranslationBlock, phys_hash_next));
756 /* remove the TB from the page list */
757 if (tb->page_addr[0] != page_addr) {
758 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
759 tb_page_remove(&p->first_tb, tb);
760 invalidate_page_bitmap(p);
762 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
763 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
764 tb_page_remove(&p->first_tb, tb);
765 invalidate_page_bitmap(p);
768 tb_invalidated_flag = 1;
770 /* remove the TB from the hash list */
771 h = tb_jmp_cache_hash_func(tb->pc);
772 for(env = first_cpu; env != NULL; env = env->next_cpu) {
773 if (env->tb_jmp_cache[h] == tb)
774 env->tb_jmp_cache[h] = NULL;
777 /* suppress this TB from the two jump lists */
778 tb_jmp_remove(tb, 0);
779 tb_jmp_remove(tb, 1);
781 /* suppress any remaining jumps to this TB */
787 tb1 = (TranslationBlock *)((long)tb1 & ~3);
788 tb2 = tb1->jmp_next[n1];
789 tb_reset_jump(tb1, n1);
790 tb1->jmp_next[n1] = NULL;
793 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
795 tb_phys_invalidate_count++;
798 static inline void set_bits(uint8_t *tab, int start, int len)
804 mask = 0xff << (start & 7);
805 if ((start & ~7) == (end & ~7)) {
807 mask &= ~(0xff << (end & 7));
812 start = (start + 8) & ~7;
814 while (start < end1) {
819 mask = ~(0xff << (end & 7));
825 static void build_page_bitmap(PageDesc *p)
827 int n, tb_start, tb_end;
828 TranslationBlock *tb;
830 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
835 tb = (TranslationBlock *)((long)tb & ~3);
836 /* NOTE: this is subtle as a TB may span two physical pages */
838 /* NOTE: tb_end may be after the end of the page, but
839 it is not a problem */
840 tb_start = tb->pc & ~TARGET_PAGE_MASK;
841 tb_end = tb_start + tb->size;
842 if (tb_end > TARGET_PAGE_SIZE)
843 tb_end = TARGET_PAGE_SIZE;
846 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
848 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
849 tb = tb->page_next[n];
853 TranslationBlock *tb_gen_code(CPUState *env,
854 target_ulong pc, target_ulong cs_base,
855 int flags, int cflags)
857 TranslationBlock *tb;
859 target_ulong phys_pc, phys_page2, virt_page2;
862 phys_pc = get_phys_addr_code(env, pc);
865 /* flush must be done */
867 /* cannot fail at this point */
869 /* Don't forget to invalidate previous TB info. */
870 tb_invalidated_flag = 1;
872 tc_ptr = code_gen_ptr;
874 tb->cs_base = cs_base;
877 cpu_gen_code(env, tb, &code_gen_size);
878 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
880 /* check next page if needed */
881 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
883 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
884 phys_page2 = get_phys_addr_code(env, virt_page2);
886 tb_link_phys(tb, phys_pc, phys_page2);
890 /* invalidate all TBs which intersect with the target physical page
891 starting in range [start;end[. NOTE: start and end must refer to
892 the same physical page. 'is_cpu_write_access' should be true if called
893 from a real cpu write access: the virtual CPU will exit the current
894 TB if code is modified inside this TB. */
895 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
896 int is_cpu_write_access)
898 TranslationBlock *tb, *tb_next, *saved_tb;
899 CPUState *env = cpu_single_env;
900 target_ulong tb_start, tb_end;
903 #ifdef TARGET_HAS_PRECISE_SMC
904 int current_tb_not_found = is_cpu_write_access;
905 TranslationBlock *current_tb = NULL;
906 int current_tb_modified = 0;
907 target_ulong current_pc = 0;
908 target_ulong current_cs_base = 0;
909 int current_flags = 0;
910 #endif /* TARGET_HAS_PRECISE_SMC */
912 p = page_find(start >> TARGET_PAGE_BITS);
915 if (!p->code_bitmap &&
916 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
917 is_cpu_write_access) {
918 /* build code bitmap */
919 build_page_bitmap(p);
922 /* we remove all the TBs in the range [start, end[ */
923 /* XXX: see if in some cases it could be faster to invalidate all the code */
927 tb = (TranslationBlock *)((long)tb & ~3);
928 tb_next = tb->page_next[n];
929 /* NOTE: this is subtle as a TB may span two physical pages */
931 /* NOTE: tb_end may be after the end of the page, but
932 it is not a problem */
933 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934 tb_end = tb_start + tb->size;
936 tb_start = tb->page_addr[1];
937 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
939 if (!(tb_end <= start || tb_start >= end)) {
940 #ifdef TARGET_HAS_PRECISE_SMC
941 if (current_tb_not_found) {
942 current_tb_not_found = 0;
944 if (env->mem_io_pc) {
945 /* now we have a real cpu fault */
946 current_tb = tb_find_pc(env->mem_io_pc);
949 if (current_tb == tb &&
950 (current_tb->cflags & CF_COUNT_MASK) != 1) {
951 /* If we are modifying the current TB, we must stop
952 its execution. We could be more precise by checking
953 that the modification is after the current PC, but it
954 would require a specialized function to partially
955 restore the CPU state */
957 current_tb_modified = 1;
958 cpu_restore_state(current_tb, env,
959 env->mem_io_pc, NULL);
960 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
963 #endif /* TARGET_HAS_PRECISE_SMC */
964 /* we need to do that to handle the case where a signal
965 occurs while doing tb_phys_invalidate() */
968 saved_tb = env->current_tb;
969 env->current_tb = NULL;
971 tb_phys_invalidate(tb, -1);
973 env->current_tb = saved_tb;
974 if (env->interrupt_request && env->current_tb)
975 cpu_interrupt(env, env->interrupt_request);
980 #if !defined(CONFIG_USER_ONLY)
981 /* if no code remaining, no need to continue to use slow writes */
983 invalidate_page_bitmap(p);
984 if (is_cpu_write_access) {
985 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
989 #ifdef TARGET_HAS_PRECISE_SMC
990 if (current_tb_modified) {
991 /* we generate a block containing just the instruction
992 modifying the memory. It will ensure that it cannot modify
994 env->current_tb = NULL;
995 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
996 cpu_resume_from_signal(env, NULL);
1001 /* len must be <= 8 and start must be a multiple of len */
1002 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1008 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1009 cpu_single_env->mem_io_vaddr, len,
1010 cpu_single_env->eip,
1011 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1014 p = page_find(start >> TARGET_PAGE_BITS);
1017 if (p->code_bitmap) {
1018 offset = start & ~TARGET_PAGE_MASK;
1019 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1020 if (b & ((1 << len) - 1))
1024 tb_invalidate_phys_page_range(start, start + len, 1);
1028 #if !defined(CONFIG_SOFTMMU)
1029 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1030 unsigned long pc, void *puc)
1032 TranslationBlock *tb;
1035 #ifdef TARGET_HAS_PRECISE_SMC
1036 TranslationBlock *current_tb = NULL;
1037 CPUState *env = cpu_single_env;
1038 int current_tb_modified = 0;
1039 target_ulong current_pc = 0;
1040 target_ulong current_cs_base = 0;
1041 int current_flags = 0;
1044 addr &= TARGET_PAGE_MASK;
1045 p = page_find(addr >> TARGET_PAGE_BITS);
1049 #ifdef TARGET_HAS_PRECISE_SMC
1050 if (tb && pc != 0) {
1051 current_tb = tb_find_pc(pc);
1054 while (tb != NULL) {
1056 tb = (TranslationBlock *)((long)tb & ~3);
1057 #ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb == tb &&
1059 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1060 /* If we are modifying the current TB, we must stop
1061 its execution. We could be more precise by checking
1062 that the modification is after the current PC, but it
1063 would require a specialized function to partially
1064 restore the CPU state */
1066 current_tb_modified = 1;
1067 cpu_restore_state(current_tb, env, pc, puc);
1068 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1071 #endif /* TARGET_HAS_PRECISE_SMC */
1072 tb_phys_invalidate(tb, addr);
1073 tb = tb->page_next[n];
1076 #ifdef TARGET_HAS_PRECISE_SMC
1077 if (current_tb_modified) {
1078 /* we generate a block containing just the instruction
1079 modifying the memory. It will ensure that it cannot modify
1081 env->current_tb = NULL;
1082 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1083 cpu_resume_from_signal(env, puc);
1089 /* add the tb in the target page and protect it if necessary */
1090 static inline void tb_alloc_page(TranslationBlock *tb,
1091 unsigned int n, target_ulong page_addr)
1094 TranslationBlock *last_first_tb;
1096 tb->page_addr[n] = page_addr;
1097 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1098 tb->page_next[n] = p->first_tb;
1099 last_first_tb = p->first_tb;
1100 p->first_tb = (TranslationBlock *)((long)tb | n);
1101 invalidate_page_bitmap(p);
1103 #if defined(TARGET_HAS_SMC) || 1
1105 #if defined(CONFIG_USER_ONLY)
1106 if (p->flags & PAGE_WRITE) {
1111 /* force the host page as non writable (writes will have a
1112 page fault + mprotect overhead) */
1113 page_addr &= qemu_host_page_mask;
1115 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1116 addr += TARGET_PAGE_SIZE) {
1118 p2 = page_find (addr >> TARGET_PAGE_BITS);
1122 p2->flags &= ~PAGE_WRITE;
1123 page_get_flags(addr);
1125 mprotect(g2h(page_addr), qemu_host_page_size,
1126 (prot & PAGE_BITS) & ~PAGE_WRITE);
1127 #ifdef DEBUG_TB_INVALIDATE
1128 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1133 /* if some code is already present, then the pages are already
1134 protected. So we handle the case where only the first TB is
1135 allocated in a physical page */
1136 if (!last_first_tb) {
1137 tlb_protect_code(page_addr);
1141 #endif /* TARGET_HAS_SMC */
1144 /* Allocate a new translation block. Flush the translation buffer if
1145 too many translation blocks or too much generated code. */
1146 TranslationBlock *tb_alloc(target_ulong pc)
1148 TranslationBlock *tb;
1150 if (nb_tbs >= code_gen_max_blocks ||
1151 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1153 tb = &tbs[nb_tbs++];
1159 void tb_free(TranslationBlock *tb)
1161 /* In practice this is mostly used for single use temporary TB
1162 Ignore the hard cases and just back up if this TB happens to
1163 be the last one generated. */
1164 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1165 code_gen_ptr = tb->tc_ptr;
1170 /* add a new TB and link it to the physical page tables. phys_page2 is
1171 (-1) to indicate that only one page contains the TB. */
1172 void tb_link_phys(TranslationBlock *tb,
1173 target_ulong phys_pc, target_ulong phys_page2)
1176 TranslationBlock **ptb;
1178 /* Grab the mmap lock to stop another thread invalidating this TB
1179 before we are done. */
1181 /* add in the physical hash table */
1182 h = tb_phys_hash_func(phys_pc);
1183 ptb = &tb_phys_hash[h];
1184 tb->phys_hash_next = *ptb;
1187 /* add in the page list */
1188 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1189 if (phys_page2 != -1)
1190 tb_alloc_page(tb, 1, phys_page2);
1192 tb->page_addr[1] = -1;
1194 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1195 tb->jmp_next[0] = NULL;
1196 tb->jmp_next[1] = NULL;
1198 /* init original jump addresses */
1199 if (tb->tb_next_offset[0] != 0xffff)
1200 tb_reset_jump(tb, 0);
1201 if (tb->tb_next_offset[1] != 0xffff)
1202 tb_reset_jump(tb, 1);
1204 #ifdef DEBUG_TB_CHECK
1210 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1211 tb[1].tc_ptr. Return NULL if not found */
1212 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1214 int m_min, m_max, m;
1216 TranslationBlock *tb;
1220 if (tc_ptr < (unsigned long)code_gen_buffer ||
1221 tc_ptr >= (unsigned long)code_gen_ptr)
1223 /* binary search (cf Knuth) */
1226 while (m_min <= m_max) {
1227 m = (m_min + m_max) >> 1;
1229 v = (unsigned long)tb->tc_ptr;
1232 else if (tc_ptr < v) {
1241 static void tb_reset_jump_recursive(TranslationBlock *tb);
1243 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1245 TranslationBlock *tb1, *tb_next, **ptb;
1248 tb1 = tb->jmp_next[n];
1250 /* find head of list */
1253 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1256 tb1 = tb1->jmp_next[n1];
1258 /* we are now sure now that tb jumps to tb1 */
1261 /* remove tb from the jmp_first list */
1262 ptb = &tb_next->jmp_first;
1266 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1267 if (n1 == n && tb1 == tb)
1269 ptb = &tb1->jmp_next[n1];
1271 *ptb = tb->jmp_next[n];
1272 tb->jmp_next[n] = NULL;
1274 /* suppress the jump to next tb in generated code */
1275 tb_reset_jump(tb, n);
1277 /* suppress jumps in the tb on which we could have jumped */
1278 tb_reset_jump_recursive(tb_next);
1282 static void tb_reset_jump_recursive(TranslationBlock *tb)
1284 tb_reset_jump_recursive2(tb, 0);
1285 tb_reset_jump_recursive2(tb, 1);
1288 #if defined(TARGET_HAS_ICE)
1289 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1291 target_phys_addr_t addr;
1293 ram_addr_t ram_addr;
1296 addr = cpu_get_phys_page_debug(env, pc);
1297 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1299 pd = IO_MEM_UNASSIGNED;
1301 pd = p->phys_offset;
1303 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1304 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1308 /* Add a watchpoint. */
1309 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1310 int flags, CPUWatchpoint **watchpoint)
1312 target_ulong len_mask = ~(len - 1);
1315 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1316 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1317 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1318 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1321 wp = qemu_malloc(sizeof(*wp));
1324 wp->len_mask = len_mask;
1327 /* keep all GDB-injected watchpoints in front */
1329 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1331 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1333 tlb_flush_page(env, addr);
1340 /* Remove a specific watchpoint. */
1341 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1344 target_ulong len_mask = ~(len - 1);
1347 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1348 if (addr == wp->vaddr && len_mask == wp->len_mask
1349 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1350 cpu_watchpoint_remove_by_ref(env, wp);
1357 /* Remove a specific watchpoint by reference. */
1358 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1360 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1362 tlb_flush_page(env, watchpoint->vaddr);
1364 qemu_free(watchpoint);
1367 /* Remove all matching watchpoints. */
1368 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1370 CPUWatchpoint *wp, *next;
1372 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1373 if (wp->flags & mask)
1374 cpu_watchpoint_remove_by_ref(env, wp);
1378 /* Add a breakpoint. */
1379 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1380 CPUBreakpoint **breakpoint)
1382 #if defined(TARGET_HAS_ICE)
1385 bp = qemu_malloc(sizeof(*bp));
1390 /* keep all GDB-injected breakpoints in front */
1392 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1394 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1396 breakpoint_invalidate(env, pc);
1406 /* Remove a specific breakpoint. */
1407 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1409 #if defined(TARGET_HAS_ICE)
1412 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1413 if (bp->pc == pc && bp->flags == flags) {
1414 cpu_breakpoint_remove_by_ref(env, bp);
1424 /* Remove a specific breakpoint by reference. */
1425 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1427 #if defined(TARGET_HAS_ICE)
1428 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1430 breakpoint_invalidate(env, breakpoint->pc);
1432 qemu_free(breakpoint);
1436 /* Remove all matching breakpoints. */
1437 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1439 #if defined(TARGET_HAS_ICE)
1440 CPUBreakpoint *bp, *next;
1442 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1443 if (bp->flags & mask)
1444 cpu_breakpoint_remove_by_ref(env, bp);
1449 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1450 CPU loop after each instruction */
1451 void cpu_single_step(CPUState *env, int enabled)
1453 #if defined(TARGET_HAS_ICE)
1454 if (env->singlestep_enabled != enabled) {
1455 env->singlestep_enabled = enabled;
1457 kvm_update_guest_debug(env, 0);
1459 /* must flush all the translated code to avoid inconsistancies */
1460 /* XXX: only flush what is necessary */
1467 /* enable or disable low levels log */
1468 void cpu_set_log(int log_flags)
1470 loglevel = log_flags;
1471 if (loglevel && !logfile) {
1472 logfile = fopen(logfilename, log_append ? "a" : "w");
1474 perror(logfilename);
1477 #if !defined(CONFIG_SOFTMMU)
1478 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1480 static char logfile_buf[4096];
1481 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1484 setvbuf(logfile, NULL, _IOLBF, 0);
1488 if (!loglevel && logfile) {
1494 void cpu_set_log_filename(const char *filename)
1496 logfilename = strdup(filename);
1501 cpu_set_log(loglevel);
1504 static void cpu_unlink_tb(CPUState *env)
1506 #if defined(USE_NPTL)
1507 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1508 problem and hope the cpu will stop of its own accord. For userspace
1509 emulation this often isn't actually as bad as it sounds. Often
1510 signals are used primarily to interrupt blocking syscalls. */
1512 TranslationBlock *tb;
1513 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1515 tb = env->current_tb;
1516 /* if the cpu is currently executing code, we must unlink it and
1517 all the potentially executing TB */
1518 if (tb && !testandset(&interrupt_lock)) {
1519 env->current_tb = NULL;
1520 tb_reset_jump_recursive(tb);
1521 resetlock(&interrupt_lock);
1526 /* mask must never be zero, except for A20 change call */
1527 void cpu_interrupt(CPUState *env, int mask)
1531 old_mask = env->interrupt_request;
1532 env->interrupt_request |= mask;
1535 env->icount_decr.u16.high = 0xffff;
1536 #ifndef CONFIG_USER_ONLY
1538 && (mask & ~old_mask) != 0) {
1539 cpu_abort(env, "Raised interrupt while not in I/O function");
1547 void cpu_reset_interrupt(CPUState *env, int mask)
1549 env->interrupt_request &= ~mask;
1552 void cpu_exit(CPUState *env)
1554 env->exit_request = 1;
1558 const CPULogItem cpu_log_items[] = {
1559 { CPU_LOG_TB_OUT_ASM, "out_asm",
1560 "show generated host assembly code for each compiled TB" },
1561 { CPU_LOG_TB_IN_ASM, "in_asm",
1562 "show target assembly code for each compiled TB" },
1563 { CPU_LOG_TB_OP, "op",
1564 "show micro ops for each compiled TB" },
1565 { CPU_LOG_TB_OP_OPT, "op_opt",
1568 "before eflags optimization and "
1570 "after liveness analysis" },
1571 { CPU_LOG_INT, "int",
1572 "show interrupts/exceptions in short format" },
1573 { CPU_LOG_EXEC, "exec",
1574 "show trace before each executed TB (lots of logs)" },
1575 { CPU_LOG_TB_CPU, "cpu",
1576 "show CPU state before block translation" },
1578 { CPU_LOG_PCALL, "pcall",
1579 "show protected mode far calls/returns/exceptions" },
1580 { CPU_LOG_RESET, "cpu_reset",
1581 "show CPU state before CPU resets" },
1584 { CPU_LOG_IOPORT, "ioport",
1585 "show all i/o ports accesses" },
1590 static int cmp1(const char *s1, int n, const char *s2)
1592 if (strlen(s2) != n)
1594 return memcmp(s1, s2, n) == 0;
1597 /* takes a comma separated list of log masks. Return 0 if error. */
1598 int cpu_str_to_log_mask(const char *str)
1600 const CPULogItem *item;
1607 p1 = strchr(p, ',');
1610 if(cmp1(p,p1-p,"all")) {
1611 for(item = cpu_log_items; item->mask != 0; item++) {
1615 for(item = cpu_log_items; item->mask != 0; item++) {
1616 if (cmp1(p, p1 - p, item->name))
1630 void cpu_abort(CPUState *env, const char *fmt, ...)
1637 fprintf(stderr, "qemu: fatal: ");
1638 vfprintf(stderr, fmt, ap);
1639 fprintf(stderr, "\n");
1641 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1643 cpu_dump_state(env, stderr, fprintf, 0);
1645 if (qemu_log_enabled()) {
1646 qemu_log("qemu: fatal: ");
1647 qemu_log_vprintf(fmt, ap2);
1650 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1652 log_cpu_state(env, 0);
1662 CPUState *cpu_copy(CPUState *env)
1664 CPUState *new_env = cpu_init(env->cpu_model_str);
1665 CPUState *next_cpu = new_env->next_cpu;
1666 int cpu_index = new_env->cpu_index;
1667 #if defined(TARGET_HAS_ICE)
1672 memcpy(new_env, env, sizeof(CPUState));
1674 /* Preserve chaining and index. */
1675 new_env->next_cpu = next_cpu;
1676 new_env->cpu_index = cpu_index;
1678 /* Clone all break/watchpoints.
1679 Note: Once we support ptrace with hw-debug register access, make sure
1680 BP_CPU break/watchpoints are handled correctly on clone. */
1681 TAILQ_INIT(&env->breakpoints);
1682 TAILQ_INIT(&env->watchpoints);
1683 #if defined(TARGET_HAS_ICE)
1684 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1685 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1687 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1688 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1696 #if !defined(CONFIG_USER_ONLY)
1698 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1702 /* Discard jump cache entries for any tb which might potentially
1703 overlap the flushed page. */
1704 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1705 memset (&env->tb_jmp_cache[i], 0,
1706 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1708 i = tb_jmp_cache_hash_page(addr);
1709 memset (&env->tb_jmp_cache[i], 0,
1710 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1713 /* NOTE: if flush_global is true, also flush global entries (not
1715 void tlb_flush(CPUState *env, int flush_global)
1719 #if defined(DEBUG_TLB)
1720 printf("tlb_flush:\n");
1722 /* must reset current TB so that interrupts cannot modify the
1723 links while we are modifying them */
1724 env->current_tb = NULL;
1726 for(i = 0; i < CPU_TLB_SIZE; i++) {
1727 env->tlb_table[0][i].addr_read = -1;
1728 env->tlb_table[0][i].addr_write = -1;
1729 env->tlb_table[0][i].addr_code = -1;
1730 env->tlb_table[1][i].addr_read = -1;
1731 env->tlb_table[1][i].addr_write = -1;
1732 env->tlb_table[1][i].addr_code = -1;
1733 #if (NB_MMU_MODES >= 3)
1734 env->tlb_table[2][i].addr_read = -1;
1735 env->tlb_table[2][i].addr_write = -1;
1736 env->tlb_table[2][i].addr_code = -1;
1738 #if (NB_MMU_MODES >= 4)
1739 env->tlb_table[3][i].addr_read = -1;
1740 env->tlb_table[3][i].addr_write = -1;
1741 env->tlb_table[3][i].addr_code = -1;
1743 #if (NB_MMU_MODES >= 5)
1744 env->tlb_table[4][i].addr_read = -1;
1745 env->tlb_table[4][i].addr_write = -1;
1746 env->tlb_table[4][i].addr_code = -1;
1751 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1754 if (env->kqemu_enabled) {
1755 kqemu_flush(env, flush_global);
1761 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1763 if (addr == (tlb_entry->addr_read &
1764 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1765 addr == (tlb_entry->addr_write &
1766 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1767 addr == (tlb_entry->addr_code &
1768 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1769 tlb_entry->addr_read = -1;
1770 tlb_entry->addr_write = -1;
1771 tlb_entry->addr_code = -1;
1775 void tlb_flush_page(CPUState *env, target_ulong addr)
1779 #if defined(DEBUG_TLB)
1780 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1782 /* must reset current TB so that interrupts cannot modify the
1783 links while we are modifying them */
1784 env->current_tb = NULL;
1786 addr &= TARGET_PAGE_MASK;
1787 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1788 tlb_flush_entry(&env->tlb_table[0][i], addr);
1789 tlb_flush_entry(&env->tlb_table[1][i], addr);
1790 #if (NB_MMU_MODES >= 3)
1791 tlb_flush_entry(&env->tlb_table[2][i], addr);
1793 #if (NB_MMU_MODES >= 4)
1794 tlb_flush_entry(&env->tlb_table[3][i], addr);
1796 #if (NB_MMU_MODES >= 5)
1797 tlb_flush_entry(&env->tlb_table[4][i], addr);
1800 tlb_flush_jmp_cache(env, addr);
1803 if (env->kqemu_enabled) {
1804 kqemu_flush_page(env, addr);
1809 /* update the TLBs so that writes to code in the virtual page 'addr'
1811 static void tlb_protect_code(ram_addr_t ram_addr)
1813 cpu_physical_memory_reset_dirty(ram_addr,
1814 ram_addr + TARGET_PAGE_SIZE,
1818 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1819 tested for self modifying code */
1820 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1823 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1826 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1827 unsigned long start, unsigned long length)
1830 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1831 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1832 if ((addr - start) < length) {
1833 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1838 /* Note: start and end must be within the same ram block. */
1839 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1843 unsigned long length, start1;
1847 start &= TARGET_PAGE_MASK;
1848 end = TARGET_PAGE_ALIGN(end);
1850 length = end - start;
1853 len = length >> TARGET_PAGE_BITS;
1855 /* XXX: should not depend on cpu context */
1857 if (env->kqemu_enabled) {
1860 for(i = 0; i < len; i++) {
1861 kqemu_set_notdirty(env, addr);
1862 addr += TARGET_PAGE_SIZE;
1866 mask = ~dirty_flags;
1867 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1868 for(i = 0; i < len; i++)
1871 /* we modify the TLB cache so that the dirty bit will be set again
1872 when accessing the range */
1873 start1 = (unsigned long)qemu_get_ram_ptr(start);
1874 /* Chek that we don't span multiple blocks - this breaks the
1875 address comparisons below. */
1876 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1877 != (end - 1) - start) {
1881 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1882 for(i = 0; i < CPU_TLB_SIZE; i++)
1883 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1884 for(i = 0; i < CPU_TLB_SIZE; i++)
1885 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1886 #if (NB_MMU_MODES >= 3)
1887 for(i = 0; i < CPU_TLB_SIZE; i++)
1888 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1890 #if (NB_MMU_MODES >= 4)
1891 for(i = 0; i < CPU_TLB_SIZE; i++)
1892 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1894 #if (NB_MMU_MODES >= 5)
1895 for(i = 0; i < CPU_TLB_SIZE; i++)
1896 tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1901 int cpu_physical_memory_set_dirty_tracking(int enable)
1903 in_migration = enable;
1907 int cpu_physical_memory_get_dirty_tracking(void)
1909 return in_migration;
1912 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1915 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1918 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1920 ram_addr_t ram_addr;
1923 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1924 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1925 + tlb_entry->addend);
1926 ram_addr = qemu_ram_addr_from_host(p);
1927 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1928 tlb_entry->addr_write |= TLB_NOTDIRTY;
1933 /* update the TLB according to the current state of the dirty bits */
1934 void cpu_tlb_update_dirty(CPUState *env)
1937 for(i = 0; i < CPU_TLB_SIZE; i++)
1938 tlb_update_dirty(&env->tlb_table[0][i]);
1939 for(i = 0; i < CPU_TLB_SIZE; i++)
1940 tlb_update_dirty(&env->tlb_table[1][i]);
1941 #if (NB_MMU_MODES >= 3)
1942 for(i = 0; i < CPU_TLB_SIZE; i++)
1943 tlb_update_dirty(&env->tlb_table[2][i]);
1945 #if (NB_MMU_MODES >= 4)
1946 for(i = 0; i < CPU_TLB_SIZE; i++)
1947 tlb_update_dirty(&env->tlb_table[3][i]);
1949 #if (NB_MMU_MODES >= 5)
1950 for(i = 0; i < CPU_TLB_SIZE; i++)
1951 tlb_update_dirty(&env->tlb_table[4][i]);
1955 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1957 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1958 tlb_entry->addr_write = vaddr;
1961 /* update the TLB corresponding to virtual page vaddr
1962 so that it is no longer dirty */
1963 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1967 vaddr &= TARGET_PAGE_MASK;
1968 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1969 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1970 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1971 #if (NB_MMU_MODES >= 3)
1972 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1974 #if (NB_MMU_MODES >= 4)
1975 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1977 #if (NB_MMU_MODES >= 5)
1978 tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
1982 /* add a new TLB entry. At most one entry for a given virtual address
1983 is permitted. Return 0 if OK or 2 if the page could not be mapped
1984 (can only happen in non SOFTMMU mode for I/O pages or pages
1985 conflicting with the host address space). */
1986 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1987 target_phys_addr_t paddr, int prot,
1988 int mmu_idx, int is_softmmu)
1993 target_ulong address;
1994 target_ulong code_address;
1995 target_phys_addr_t addend;
1999 target_phys_addr_t iotlb;
2001 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2003 pd = IO_MEM_UNASSIGNED;
2005 pd = p->phys_offset;
2007 #if defined(DEBUG_TLB)
2008 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2009 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2014 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2015 /* IO memory case (romd handled later) */
2016 address |= TLB_MMIO;
2018 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2019 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2021 iotlb = pd & TARGET_PAGE_MASK;
2022 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2023 iotlb |= IO_MEM_NOTDIRTY;
2025 iotlb |= IO_MEM_ROM;
2027 /* IO handlers are currently passed a phsical address.
2028 It would be nice to pass an offset from the base address
2029 of that region. This would avoid having to special case RAM,
2030 and avoid full address decoding in every device.
2031 We can't use the high bits of pd for this because
2032 IO_MEM_ROMD uses these as a ram address. */
2033 iotlb = (pd & ~TARGET_PAGE_MASK);
2035 iotlb += p->region_offset;
2041 code_address = address;
2042 /* Make accesses to pages with watchpoints go via the
2043 watchpoint trap routines. */
2044 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2045 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2046 iotlb = io_mem_watch + paddr;
2047 /* TODO: The memory case can be optimized by not trapping
2048 reads of pages with a write breakpoint. */
2049 address |= TLB_MMIO;
2053 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2054 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2055 te = &env->tlb_table[mmu_idx][index];
2056 te->addend = addend - vaddr;
2057 if (prot & PAGE_READ) {
2058 te->addr_read = address;
2063 if (prot & PAGE_EXEC) {
2064 te->addr_code = code_address;
2068 if (prot & PAGE_WRITE) {
2069 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2070 (pd & IO_MEM_ROMD)) {
2071 /* Write access calls the I/O callback. */
2072 te->addr_write = address | TLB_MMIO;
2073 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2074 !cpu_physical_memory_is_dirty(pd)) {
2075 te->addr_write = address | TLB_NOTDIRTY;
2077 te->addr_write = address;
2080 te->addr_write = -1;
2087 void tlb_flush(CPUState *env, int flush_global)
2091 void tlb_flush_page(CPUState *env, target_ulong addr)
2095 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2096 target_phys_addr_t paddr, int prot,
2097 int mmu_idx, int is_softmmu)
2102 /* dump memory mappings */
2103 void page_dump(FILE *f)
2105 unsigned long start, end;
2106 int i, j, prot, prot1;
2109 fprintf(f, "%-8s %-8s %-8s %s\n",
2110 "start", "end", "size", "prot");
2114 for(i = 0; i <= L1_SIZE; i++) {
2119 for(j = 0;j < L2_SIZE; j++) {
2124 if (prot1 != prot) {
2125 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2127 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2128 start, end, end - start,
2129 prot & PAGE_READ ? 'r' : '-',
2130 prot & PAGE_WRITE ? 'w' : '-',
2131 prot & PAGE_EXEC ? 'x' : '-');
2145 int page_get_flags(target_ulong address)
2149 p = page_find(address >> TARGET_PAGE_BITS);
2155 /* modify the flags of a page and invalidate the code if
2156 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2157 depending on PAGE_WRITE */
2158 void page_set_flags(target_ulong start, target_ulong end, int flags)
2163 /* mmap_lock should already be held. */
2164 start = start & TARGET_PAGE_MASK;
2165 end = TARGET_PAGE_ALIGN(end);
2166 if (flags & PAGE_WRITE)
2167 flags |= PAGE_WRITE_ORG;
2168 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2169 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2170 /* We may be called for host regions that are outside guest
2174 /* if the write protection is set, then we invalidate the code
2176 if (!(p->flags & PAGE_WRITE) &&
2177 (flags & PAGE_WRITE) &&
2179 tb_invalidate_phys_page(addr, 0, NULL);
2185 int page_check_range(target_ulong start, target_ulong len, int flags)
2191 if (start + len < start)
2192 /* we've wrapped around */
2195 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2196 start = start & TARGET_PAGE_MASK;
2198 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2199 p = page_find(addr >> TARGET_PAGE_BITS);
2202 if( !(p->flags & PAGE_VALID) )
2205 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2207 if (flags & PAGE_WRITE) {
2208 if (!(p->flags & PAGE_WRITE_ORG))
2210 /* unprotect the page if it was put read-only because it
2211 contains translated code */
2212 if (!(p->flags & PAGE_WRITE)) {
2213 if (!page_unprotect(addr, 0, NULL))
2222 /* called from signal handler: invalidate the code and unprotect the
2223 page. Return TRUE if the fault was succesfully handled. */
2224 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2226 unsigned int page_index, prot, pindex;
2228 target_ulong host_start, host_end, addr;
2230 /* Technically this isn't safe inside a signal handler. However we
2231 know this only ever happens in a synchronous SEGV handler, so in
2232 practice it seems to be ok. */
2235 host_start = address & qemu_host_page_mask;
2236 page_index = host_start >> TARGET_PAGE_BITS;
2237 p1 = page_find(page_index);
2242 host_end = host_start + qemu_host_page_size;
2245 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2249 /* if the page was really writable, then we change its
2250 protection back to writable */
2251 if (prot & PAGE_WRITE_ORG) {
2252 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2253 if (!(p1[pindex].flags & PAGE_WRITE)) {
2254 mprotect((void *)g2h(host_start), qemu_host_page_size,
2255 (prot & PAGE_BITS) | PAGE_WRITE);
2256 p1[pindex].flags |= PAGE_WRITE;
2257 /* and since the content will be modified, we must invalidate
2258 the corresponding translated code. */
2259 tb_invalidate_phys_page(address, pc, puc);
2260 #ifdef DEBUG_TB_CHECK
2261 tb_invalidate_check(address);
2271 static inline void tlb_set_dirty(CPUState *env,
2272 unsigned long addr, target_ulong vaddr)
2275 #endif /* defined(CONFIG_USER_ONLY) */
2277 #if !defined(CONFIG_USER_ONLY)
2279 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2280 ram_addr_t memory, ram_addr_t region_offset);
2281 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2282 ram_addr_t orig_memory, ram_addr_t region_offset);
2283 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2286 if (addr > start_addr) \
2289 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2290 if (start_addr2 > 0) \
2294 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2295 end_addr2 = TARGET_PAGE_SIZE - 1; \
2297 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2298 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2303 /* register physical memory. 'size' must be a multiple of the target
2304 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2305 io memory page. The address used when calling the IO function is
2306 the offset from the start of the region, plus region_offset. Both
2307 start_region and regon_offset are rounded down to a page boundary
2308 before calculating this offset. This should not be a problem unless
2309 the low bits of start_addr and region_offset differ. */
2310 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2312 ram_addr_t phys_offset,
2313 ram_addr_t region_offset)
2315 target_phys_addr_t addr, end_addr;
2318 ram_addr_t orig_size = size;
2322 /* XXX: should not depend on cpu context */
2324 if (env->kqemu_enabled) {
2325 kqemu_set_phys_mem(start_addr, size, phys_offset);
2329 kvm_set_phys_mem(start_addr, size, phys_offset);
2331 if (phys_offset == IO_MEM_UNASSIGNED) {
2332 region_offset = start_addr;
2334 region_offset &= TARGET_PAGE_MASK;
2335 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2336 end_addr = start_addr + (target_phys_addr_t)size;
2337 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2338 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2339 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2340 ram_addr_t orig_memory = p->phys_offset;
2341 target_phys_addr_t start_addr2, end_addr2;
2342 int need_subpage = 0;
2344 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2346 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2347 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2348 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2349 &p->phys_offset, orig_memory,
2352 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2355 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2357 p->region_offset = 0;
2359 p->phys_offset = phys_offset;
2360 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2361 (phys_offset & IO_MEM_ROMD))
2362 phys_offset += TARGET_PAGE_SIZE;
2365 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2366 p->phys_offset = phys_offset;
2367 p->region_offset = region_offset;
2368 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2369 (phys_offset & IO_MEM_ROMD)) {
2370 phys_offset += TARGET_PAGE_SIZE;
2372 target_phys_addr_t start_addr2, end_addr2;
2373 int need_subpage = 0;
2375 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2376 end_addr2, need_subpage);
2378 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2379 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2380 &p->phys_offset, IO_MEM_UNASSIGNED,
2381 addr & TARGET_PAGE_MASK);
2382 subpage_register(subpage, start_addr2, end_addr2,
2383 phys_offset, region_offset);
2384 p->region_offset = 0;
2388 region_offset += TARGET_PAGE_SIZE;
2391 /* since each CPU stores ram addresses in its TLB cache, we must
2392 reset the modified entries */
2394 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2399 /* XXX: temporary until new memory mapping API */
2400 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2404 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2406 return IO_MEM_UNASSIGNED;
2407 return p->phys_offset;
2410 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2413 kvm_coalesce_mmio_region(addr, size);
2416 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2419 kvm_uncoalesce_mmio_region(addr, size);
2422 /* XXX: better than nothing */
2423 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2426 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2427 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2428 (uint64_t)size, (uint64_t)phys_ram_size);
2431 addr = phys_ram_alloc_offset;
2432 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2436 void qemu_ram_free(ram_addr_t addr)
2440 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2441 With the exception of the softmmu code in this file, this should
2442 only be used for local memory (e.g. video ram) that the device owns,
2443 and knows it isn't going to access beyond the end of the block.
2445 It should not be used for general purpose DMA.
2446 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2448 void *qemu_get_ram_ptr(ram_addr_t addr)
2450 return phys_ram_base + addr;
2453 /* Some of the softmmu routines need to translate from a host pointer
2454 (typically a TLB entry) back to a ram offset. */
2455 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2457 return (uint8_t *)ptr - phys_ram_base;
2460 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2462 #ifdef DEBUG_UNASSIGNED
2463 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2465 #if defined(TARGET_SPARC)
2466 do_unassigned_access(addr, 0, 0, 0, 1);
2471 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2473 #ifdef DEBUG_UNASSIGNED
2474 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2476 #if defined(TARGET_SPARC)
2477 do_unassigned_access(addr, 0, 0, 0, 2);
2482 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2484 #ifdef DEBUG_UNASSIGNED
2485 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2487 #if defined(TARGET_SPARC)
2488 do_unassigned_access(addr, 0, 0, 0, 4);
2493 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2495 #ifdef DEBUG_UNASSIGNED
2496 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2498 #if defined(TARGET_SPARC)
2499 do_unassigned_access(addr, 1, 0, 0, 1);
2503 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2505 #ifdef DEBUG_UNASSIGNED
2506 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2508 #if defined(TARGET_SPARC)
2509 do_unassigned_access(addr, 1, 0, 0, 2);
2513 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2515 #ifdef DEBUG_UNASSIGNED
2516 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2518 #if defined(TARGET_SPARC)
2519 do_unassigned_access(addr, 1, 0, 0, 4);
2523 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2524 unassigned_mem_readb,
2525 unassigned_mem_readw,
2526 unassigned_mem_readl,
2529 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2530 unassigned_mem_writeb,
2531 unassigned_mem_writew,
2532 unassigned_mem_writel,
2535 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2539 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2540 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2541 #if !defined(CONFIG_USER_ONLY)
2542 tb_invalidate_phys_page_fast(ram_addr, 1);
2543 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2546 stb_p(qemu_get_ram_ptr(ram_addr), val);
2548 if (cpu_single_env->kqemu_enabled &&
2549 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2550 kqemu_modify_page(cpu_single_env, ram_addr);
2552 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2553 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2554 /* we remove the notdirty callback only if the code has been
2556 if (dirty_flags == 0xff)
2557 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2560 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2564 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2565 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2566 #if !defined(CONFIG_USER_ONLY)
2567 tb_invalidate_phys_page_fast(ram_addr, 2);
2568 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2571 stw_p(qemu_get_ram_ptr(ram_addr), val);
2573 if (cpu_single_env->kqemu_enabled &&
2574 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2575 kqemu_modify_page(cpu_single_env, ram_addr);
2577 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2578 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2579 /* we remove the notdirty callback only if the code has been
2581 if (dirty_flags == 0xff)
2582 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2585 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2589 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2590 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2591 #if !defined(CONFIG_USER_ONLY)
2592 tb_invalidate_phys_page_fast(ram_addr, 4);
2593 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2596 stl_p(qemu_get_ram_ptr(ram_addr), val);
2598 if (cpu_single_env->kqemu_enabled &&
2599 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2600 kqemu_modify_page(cpu_single_env, ram_addr);
2602 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2603 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2604 /* we remove the notdirty callback only if the code has been
2606 if (dirty_flags == 0xff)
2607 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2610 static CPUReadMemoryFunc *error_mem_read[3] = {
2611 NULL, /* never used */
2612 NULL, /* never used */
2613 NULL, /* never used */
2616 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2617 notdirty_mem_writeb,
2618 notdirty_mem_writew,
2619 notdirty_mem_writel,
2622 /* Generate a debug exception if a watchpoint has been hit. */
2623 static void check_watchpoint(int offset, int len_mask, int flags)
2625 CPUState *env = cpu_single_env;
2626 target_ulong pc, cs_base;
2627 TranslationBlock *tb;
2632 if (env->watchpoint_hit) {
2633 /* We re-entered the check after replacing the TB. Now raise
2634 * the debug interrupt so that is will trigger after the
2635 * current instruction. */
2636 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2639 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2640 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2641 if ((vaddr == (wp->vaddr & len_mask) ||
2642 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2643 wp->flags |= BP_WATCHPOINT_HIT;
2644 if (!env->watchpoint_hit) {
2645 env->watchpoint_hit = wp;
2646 tb = tb_find_pc(env->mem_io_pc);
2648 cpu_abort(env, "check_watchpoint: could not find TB for "
2649 "pc=%p", (void *)env->mem_io_pc);
2651 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2652 tb_phys_invalidate(tb, -1);
2653 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2654 env->exception_index = EXCP_DEBUG;
2656 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2657 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2659 cpu_resume_from_signal(env, NULL);
2662 wp->flags &= ~BP_WATCHPOINT_HIT;
2667 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2668 so these check for a hit then pass through to the normal out-of-line
2670 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2672 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2673 return ldub_phys(addr);
2676 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2678 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2679 return lduw_phys(addr);
2682 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2684 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2685 return ldl_phys(addr);
2688 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2691 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2692 stb_phys(addr, val);
2695 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2698 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2699 stw_phys(addr, val);
2702 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2705 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2706 stl_phys(addr, val);
2709 static CPUReadMemoryFunc *watch_mem_read[3] = {
2715 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2721 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2727 idx = SUBPAGE_IDX(addr);
2728 #if defined(DEBUG_SUBPAGE)
2729 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2730 mmio, len, addr, idx);
2732 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2733 addr + mmio->region_offset[idx][0][len]);
2738 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2739 uint32_t value, unsigned int len)
2743 idx = SUBPAGE_IDX(addr);
2744 #if defined(DEBUG_SUBPAGE)
2745 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2746 mmio, len, addr, idx, value);
2748 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2749 addr + mmio->region_offset[idx][1][len],
2753 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2755 #if defined(DEBUG_SUBPAGE)
2756 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2759 return subpage_readlen(opaque, addr, 0);
2762 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2765 #if defined(DEBUG_SUBPAGE)
2766 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2768 subpage_writelen(opaque, addr, value, 0);
2771 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2773 #if defined(DEBUG_SUBPAGE)
2774 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2777 return subpage_readlen(opaque, addr, 1);
2780 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2783 #if defined(DEBUG_SUBPAGE)
2784 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2786 subpage_writelen(opaque, addr, value, 1);
2789 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2791 #if defined(DEBUG_SUBPAGE)
2792 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2795 return subpage_readlen(opaque, addr, 2);
2798 static void subpage_writel (void *opaque,
2799 target_phys_addr_t addr, uint32_t value)
2801 #if defined(DEBUG_SUBPAGE)
2802 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2804 subpage_writelen(opaque, addr, value, 2);
2807 static CPUReadMemoryFunc *subpage_read[] = {
2813 static CPUWriteMemoryFunc *subpage_write[] = {
2819 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2820 ram_addr_t memory, ram_addr_t region_offset)
2825 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2827 idx = SUBPAGE_IDX(start);
2828 eidx = SUBPAGE_IDX(end);
2829 #if defined(DEBUG_SUBPAGE)
2830 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2831 mmio, start, end, idx, eidx, memory);
2833 memory >>= IO_MEM_SHIFT;
2834 for (; idx <= eidx; idx++) {
2835 for (i = 0; i < 4; i++) {
2836 if (io_mem_read[memory][i]) {
2837 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2838 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2839 mmio->region_offset[idx][0][i] = region_offset;
2841 if (io_mem_write[memory][i]) {
2842 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2843 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2844 mmio->region_offset[idx][1][i] = region_offset;
2852 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2853 ram_addr_t orig_memory, ram_addr_t region_offset)
2858 mmio = qemu_mallocz(sizeof(subpage_t));
2861 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2862 #if defined(DEBUG_SUBPAGE)
2863 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2864 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2866 *phys = subpage_memory | IO_MEM_SUBPAGE;
2867 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2873 static int get_free_io_mem_idx(void)
2877 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2878 if (!io_mem_used[i]) {
2886 static void io_mem_init(void)
2890 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2891 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2892 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2896 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2897 watch_mem_write, NULL);
2898 /* alloc dirty bits array */
2899 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2900 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2903 /* mem_read and mem_write are arrays of functions containing the
2904 function to access byte (index 0), word (index 1) and dword (index
2905 2). Functions can be omitted with a NULL function pointer. The
2906 registered functions may be modified dynamically later.
2907 If io_index is non zero, the corresponding io zone is
2908 modified. If it is zero, a new io zone is allocated. The return
2909 value can be used with cpu_register_physical_memory(). (-1) is
2910 returned if error. */
2911 int cpu_register_io_memory(int io_index,
2912 CPUReadMemoryFunc **mem_read,
2913 CPUWriteMemoryFunc **mem_write,
2916 int i, subwidth = 0;
2918 if (io_index <= 0) {
2919 io_index = get_free_io_mem_idx();
2923 if (io_index >= IO_MEM_NB_ENTRIES)
2927 for(i = 0;i < 3; i++) {
2928 if (!mem_read[i] || !mem_write[i])
2929 subwidth = IO_MEM_SUBWIDTH;
2930 io_mem_read[io_index][i] = mem_read[i];
2931 io_mem_write[io_index][i] = mem_write[i];
2933 io_mem_opaque[io_index] = opaque;
2934 return (io_index << IO_MEM_SHIFT) | subwidth;
2937 void cpu_unregister_io_memory(int io_table_address)
2940 int io_index = io_table_address >> IO_MEM_SHIFT;
2942 for (i=0;i < 3; i++) {
2943 io_mem_read[io_index][i] = unassigned_mem_read[i];
2944 io_mem_write[io_index][i] = unassigned_mem_write[i];
2946 io_mem_opaque[io_index] = NULL;
2947 io_mem_used[io_index] = 0;
2950 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2952 return io_mem_write[io_index >> IO_MEM_SHIFT];
2955 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2957 return io_mem_read[io_index >> IO_MEM_SHIFT];
2960 #endif /* !defined(CONFIG_USER_ONLY) */
2962 /* physical memory access (slow version, mainly for debug) */
2963 #if defined(CONFIG_USER_ONLY)
2964 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2965 int len, int is_write)
2972 page = addr & TARGET_PAGE_MASK;
2973 l = (page + TARGET_PAGE_SIZE) - addr;
2976 flags = page_get_flags(page);
2977 if (!(flags & PAGE_VALID))
2980 if (!(flags & PAGE_WRITE))
2982 /* XXX: this code should not depend on lock_user */
2983 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2984 /* FIXME - should this return an error rather than just fail? */
2987 unlock_user(p, addr, l);
2989 if (!(flags & PAGE_READ))
2991 /* XXX: this code should not depend on lock_user */
2992 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2993 /* FIXME - should this return an error rather than just fail? */
2996 unlock_user(p, addr, 0);
3005 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3006 int len, int is_write)
3011 target_phys_addr_t page;
3016 page = addr & TARGET_PAGE_MASK;
3017 l = (page + TARGET_PAGE_SIZE) - addr;
3020 p = phys_page_find(page >> TARGET_PAGE_BITS);
3022 pd = IO_MEM_UNASSIGNED;
3024 pd = p->phys_offset;
3028 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3029 target_phys_addr_t addr1 = addr;
3030 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3032 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3033 /* XXX: could force cpu_single_env to NULL to avoid
3035 if (l >= 4 && ((addr1 & 3) == 0)) {
3036 /* 32 bit write access */
3038 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3040 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3041 /* 16 bit write access */
3043 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3046 /* 8 bit write access */
3048 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3052 unsigned long addr1;
3053 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3055 ptr = qemu_get_ram_ptr(addr1);
3056 memcpy(ptr, buf, l);
3057 if (!cpu_physical_memory_is_dirty(addr1)) {
3058 /* invalidate code */
3059 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3061 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3062 (0xff & ~CODE_DIRTY_FLAG);
3066 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3067 !(pd & IO_MEM_ROMD)) {
3068 target_phys_addr_t addr1 = addr;
3070 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3072 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3073 if (l >= 4 && ((addr1 & 3) == 0)) {
3074 /* 32 bit read access */
3075 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3078 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3079 /* 16 bit read access */
3080 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3084 /* 8 bit read access */
3085 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3091 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3092 (addr & ~TARGET_PAGE_MASK);
3093 memcpy(buf, ptr, l);
3102 /* used for ROM loading : can write in RAM and ROM */
3103 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3104 const uint8_t *buf, int len)
3108 target_phys_addr_t page;
3113 page = addr & TARGET_PAGE_MASK;
3114 l = (page + TARGET_PAGE_SIZE) - addr;
3117 p = phys_page_find(page >> TARGET_PAGE_BITS);
3119 pd = IO_MEM_UNASSIGNED;
3121 pd = p->phys_offset;
3124 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3125 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3126 !(pd & IO_MEM_ROMD)) {
3129 unsigned long addr1;
3130 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3132 ptr = qemu_get_ram_ptr(addr1);
3133 memcpy(ptr, buf, l);
3143 target_phys_addr_t addr;
3144 target_phys_addr_t len;
3147 static BounceBuffer bounce;
3149 typedef struct MapClient {
3151 void (*callback)(void *opaque);
3152 LIST_ENTRY(MapClient) link;
3155 static LIST_HEAD(map_client_list, MapClient) map_client_list
3156 = LIST_HEAD_INITIALIZER(map_client_list);
3158 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3160 MapClient *client = qemu_malloc(sizeof(*client));
3162 client->opaque = opaque;
3163 client->callback = callback;
3164 LIST_INSERT_HEAD(&map_client_list, client, link);
3168 void cpu_unregister_map_client(void *_client)
3170 MapClient *client = (MapClient *)_client;
3172 LIST_REMOVE(client, link);
3175 static void cpu_notify_map_clients(void)
3179 while (!LIST_EMPTY(&map_client_list)) {
3180 client = LIST_FIRST(&map_client_list);
3181 client->callback(client->opaque);
3182 LIST_REMOVE(client, link);
3186 /* Map a physical memory region into a host virtual address.
3187 * May map a subset of the requested range, given by and returned in *plen.
3188 * May return NULL if resources needed to perform the mapping are exhausted.
3189 * Use only for reads OR writes - not for read-modify-write operations.
3190 * Use cpu_register_map_client() to know when retrying the map operation is
3191 * likely to succeed.
3193 void *cpu_physical_memory_map(target_phys_addr_t addr,
3194 target_phys_addr_t *plen,
3197 target_phys_addr_t len = *plen;
3198 target_phys_addr_t done = 0;
3200 uint8_t *ret = NULL;
3202 target_phys_addr_t page;
3205 unsigned long addr1;
3208 page = addr & TARGET_PAGE_MASK;
3209 l = (page + TARGET_PAGE_SIZE) - addr;
3212 p = phys_page_find(page >> TARGET_PAGE_BITS);
3214 pd = IO_MEM_UNASSIGNED;
3216 pd = p->phys_offset;
3219 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3220 if (done || bounce.buffer) {
3223 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3227 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3229 ptr = bounce.buffer;
3231 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3232 ptr = qemu_get_ram_ptr(addr1);
3236 } else if (ret + done != ptr) {
3248 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3249 * Will also mark the memory as dirty if is_write == 1. access_len gives
3250 * the amount of memory that was actually read or written by the caller.
3252 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3253 int is_write, target_phys_addr_t access_len)
3255 if (buffer != bounce.buffer) {
3257 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3258 while (access_len) {
3260 l = TARGET_PAGE_SIZE;
3263 if (!cpu_physical_memory_is_dirty(addr1)) {
3264 /* invalidate code */
3265 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3267 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3268 (0xff & ~CODE_DIRTY_FLAG);
3277 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3279 qemu_free(bounce.buffer);
3280 bounce.buffer = NULL;
3281 cpu_notify_map_clients();
3284 /* warning: addr must be aligned */
3285 uint32_t ldl_phys(target_phys_addr_t addr)
3293 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3295 pd = IO_MEM_UNASSIGNED;
3297 pd = p->phys_offset;
3300 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3301 !(pd & IO_MEM_ROMD)) {
3303 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3305 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3306 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3309 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3310 (addr & ~TARGET_PAGE_MASK);
3316 /* warning: addr must be aligned */
3317 uint64_t ldq_phys(target_phys_addr_t addr)
3325 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3327 pd = IO_MEM_UNASSIGNED;
3329 pd = p->phys_offset;
3332 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3333 !(pd & IO_MEM_ROMD)) {
3335 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3337 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3338 #ifdef TARGET_WORDS_BIGENDIAN
3339 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3340 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3342 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3343 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3347 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3348 (addr & ~TARGET_PAGE_MASK);
3355 uint32_t ldub_phys(target_phys_addr_t addr)
3358 cpu_physical_memory_read(addr, &val, 1);
3363 uint32_t lduw_phys(target_phys_addr_t addr)
3366 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3367 return tswap16(val);
3370 /* warning: addr must be aligned. The ram page is not masked as dirty
3371 and the code inside is not invalidated. It is useful if the dirty
3372 bits are used to track modified PTEs */
3373 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3380 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3382 pd = IO_MEM_UNASSIGNED;
3384 pd = p->phys_offset;
3387 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3388 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3390 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3391 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3393 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3394 ptr = qemu_get_ram_ptr(addr1);
3397 if (unlikely(in_migration)) {
3398 if (!cpu_physical_memory_is_dirty(addr1)) {
3399 /* invalidate code */
3400 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3402 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3403 (0xff & ~CODE_DIRTY_FLAG);
3409 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3416 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3418 pd = IO_MEM_UNASSIGNED;
3420 pd = p->phys_offset;
3423 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3424 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3426 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3427 #ifdef TARGET_WORDS_BIGENDIAN
3428 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3429 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3431 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3432 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3435 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3436 (addr & ~TARGET_PAGE_MASK);
3441 /* warning: addr must be aligned */
3442 void stl_phys(target_phys_addr_t addr, uint32_t val)
3449 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3451 pd = IO_MEM_UNASSIGNED;
3453 pd = p->phys_offset;
3456 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3457 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3459 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3460 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3462 unsigned long addr1;
3463 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3465 ptr = qemu_get_ram_ptr(addr1);
3467 if (!cpu_physical_memory_is_dirty(addr1)) {
3468 /* invalidate code */
3469 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3471 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3472 (0xff & ~CODE_DIRTY_FLAG);
3478 void stb_phys(target_phys_addr_t addr, uint32_t val)
3481 cpu_physical_memory_write(addr, &v, 1);
3485 void stw_phys(target_phys_addr_t addr, uint32_t val)
3487 uint16_t v = tswap16(val);
3488 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3492 void stq_phys(target_phys_addr_t addr, uint64_t val)
3495 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3500 /* virtual memory access for debug (includes writing to ROM) */
3501 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3502 uint8_t *buf, int len, int is_write)
3505 target_phys_addr_t phys_addr;
3509 page = addr & TARGET_PAGE_MASK;
3510 phys_addr = cpu_get_phys_page_debug(env, page);
3511 /* if no physical page mapped, return an error */
3512 if (phys_addr == -1)
3514 l = (page + TARGET_PAGE_SIZE) - addr;
3517 phys_addr += (addr & ~TARGET_PAGE_MASK);
3518 #if !defined(CONFIG_USER_ONLY)
3520 cpu_physical_memory_write_rom(phys_addr, buf, l);
3523 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3531 /* in deterministic execution mode, instructions doing device I/Os
3532 must be at the end of the TB */
3533 void cpu_io_recompile(CPUState *env, void *retaddr)
3535 TranslationBlock *tb;
3537 target_ulong pc, cs_base;
3540 tb = tb_find_pc((unsigned long)retaddr);
3542 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3545 n = env->icount_decr.u16.low + tb->icount;
3546 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3547 /* Calculate how many instructions had been executed before the fault
3549 n = n - env->icount_decr.u16.low;
3550 /* Generate a new TB ending on the I/O insn. */
3552 /* On MIPS and SH, delay slot instructions can only be restarted if
3553 they were already the first instruction in the TB. If this is not
3554 the first instruction in a TB then re-execute the preceding
3556 #if defined(TARGET_MIPS)
3557 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3558 env->active_tc.PC -= 4;
3559 env->icount_decr.u16.low++;
3560 env->hflags &= ~MIPS_HFLAG_BMASK;
3562 #elif defined(TARGET_SH4)
3563 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3566 env->icount_decr.u16.low++;
3567 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3570 /* This should never happen. */
3571 if (n > CF_COUNT_MASK)
3572 cpu_abort(env, "TB too big during recompile");
3574 cflags = n | CF_LAST_IO;
3576 cs_base = tb->cs_base;
3578 tb_phys_invalidate(tb, -1);
3579 /* FIXME: In theory this could raise an exception. In practice
3580 we have already translated the block once so it's probably ok. */
3581 tb_gen_code(env, pc, cs_base, flags, cflags);
3582 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3583 the first in the TB) then we end up generating a whole new TB and
3584 repeating the fault, which is horribly inefficient.
3585 Better would be to execute just this insn uncached, or generate a
3587 cpu_resume_from_signal(env, NULL);
3590 void dump_exec_info(FILE *f,
3591 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3593 int i, target_code_size, max_target_code_size;
3594 int direct_jmp_count, direct_jmp2_count, cross_page;
3595 TranslationBlock *tb;
3597 target_code_size = 0;
3598 max_target_code_size = 0;
3600 direct_jmp_count = 0;
3601 direct_jmp2_count = 0;
3602 for(i = 0; i < nb_tbs; i++) {
3604 target_code_size += tb->size;
3605 if (tb->size > max_target_code_size)
3606 max_target_code_size = tb->size;
3607 if (tb->page_addr[1] != -1)
3609 if (tb->tb_next_offset[0] != 0xffff) {
3611 if (tb->tb_next_offset[1] != 0xffff) {
3612 direct_jmp2_count++;
3616 /* XXX: avoid using doubles ? */
3617 cpu_fprintf(f, "Translation buffer state:\n");
3618 cpu_fprintf(f, "gen code size %ld/%ld\n",
3619 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3620 cpu_fprintf(f, "TB count %d/%d\n",
3621 nb_tbs, code_gen_max_blocks);
3622 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3623 nb_tbs ? target_code_size / nb_tbs : 0,
3624 max_target_code_size);
3625 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3626 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3627 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3628 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3630 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3631 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3633 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3635 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3636 cpu_fprintf(f, "\nStatistics:\n");
3637 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3638 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3639 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3640 tcg_dump_info(f, cpu_fprintf);
3643 #if !defined(CONFIG_USER_ONLY)
3645 #define MMUSUFFIX _cmmu
3646 #define GETPC() NULL
3647 #define env cpu_single_env
3648 #define SOFTMMU_CODE_ACCESS
3651 #include "softmmu_template.h"
3654 #include "softmmu_template.h"
3657 #include "softmmu_template.h"
3660 #include "softmmu_template.h"