2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
63 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock *tbs;
83 static int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 static uint8_t *code_gen_ptr;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration;
116 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
118 static MemoryRegion *system_memory;
119 static MemoryRegion *system_io;
121 MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
122 static MemoryRegion io_mem_subpage_ram;
127 /* current CPU in the current thread. It is only valid inside
129 DEFINE_TLS(CPUState *,cpu_single_env);
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
135 typedef struct PageDesc {
136 /* list of TBs intersecting this ram page */
137 TranslationBlock *first_tb;
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142 #if defined(CONFIG_USER_ONLY)
147 /* In system mode we want L1_MAP to be based on ram offsets,
148 while in user mode we want it to be based on virtual addresses. */
149 #if !defined(CONFIG_USER_ONLY)
150 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
156 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
159 /* Size of the L2 (and L3, etc) page tables. */
161 #define L2_SIZE (1 << L2_BITS)
163 #define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
166 /* The bits remaining after N lower levels of page tables. */
167 #define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
170 #if V_L1_BITS_REM < 4
171 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
173 #define V_L1_BITS V_L1_BITS_REM
176 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
178 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
180 unsigned long qemu_real_host_page_size;
181 unsigned long qemu_host_page_size;
182 unsigned long qemu_host_page_mask;
184 /* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186 static void *l1_map[V_L1_SIZE];
188 #if !defined(CONFIG_USER_ONLY)
189 typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
195 /* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
197 static void *phys_map;
199 static void io_mem_init(void);
200 static void memory_map_init(void);
202 /* io memory support */
203 MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
204 static char io_mem_used[IO_MEM_NB_ENTRIES];
205 static MemoryRegion io_mem_watch;
210 static const char *logfilename = "qemu.log";
212 static const char *logfilename = "/tmp/qemu.log";
216 static int log_append = 0;
219 #if !defined(CONFIG_USER_ONLY)
220 static int tlb_flush_count;
222 static int tb_flush_count;
223 static int tb_phys_invalidate_count;
226 static void map_exec(void *addr, long size)
229 VirtualProtect(addr, size,
230 PAGE_EXECUTE_READWRITE, &old_protect);
234 static void map_exec(void *addr, long size)
236 unsigned long start, end, page_size;
238 page_size = getpagesize();
239 start = (unsigned long)addr;
240 start &= ~(page_size - 1);
242 end = (unsigned long)addr + size;
243 end += page_size - 1;
244 end &= ~(page_size - 1);
246 mprotect((void *)start, end - start,
247 PROT_READ | PROT_WRITE | PROT_EXEC);
251 static void page_init(void)
253 /* NOTE: we can always suppose that qemu_host_page_size >=
257 SYSTEM_INFO system_info;
259 GetSystemInfo(&system_info);
260 qemu_real_host_page_size = system_info.dwPageSize;
263 qemu_real_host_page_size = getpagesize();
265 if (qemu_host_page_size == 0)
266 qemu_host_page_size = qemu_real_host_page_size;
267 if (qemu_host_page_size < TARGET_PAGE_SIZE)
268 qemu_host_page_size = TARGET_PAGE_SIZE;
269 qemu_host_page_mask = ~(qemu_host_page_size - 1);
271 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
273 #ifdef HAVE_KINFO_GETVMMAP
274 struct kinfo_vmentry *freep;
277 freep = kinfo_getvmmap(getpid(), &cnt);
280 for (i = 0; i < cnt; i++) {
281 unsigned long startaddr, endaddr;
283 startaddr = freep[i].kve_start;
284 endaddr = freep[i].kve_end;
285 if (h2g_valid(startaddr)) {
286 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
288 if (h2g_valid(endaddr)) {
289 endaddr = h2g(endaddr);
290 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
292 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
305 last_brk = (unsigned long)sbrk(0);
307 f = fopen("/compat/linux/proc/self/maps", "r");
312 unsigned long startaddr, endaddr;
315 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
317 if (n == 2 && h2g_valid(startaddr)) {
318 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
320 if (h2g_valid(endaddr)) {
321 endaddr = h2g(endaddr);
325 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
337 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
343 #if defined(CONFIG_USER_ONLY)
344 /* We can't use g_malloc because it may recurse into a locked mutex. */
345 # define ALLOC(P, SIZE) \
347 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
348 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
351 # define ALLOC(P, SIZE) \
352 do { P = g_malloc0(SIZE); } while (0)
355 /* Level 1. Always allocated. */
356 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
359 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
366 ALLOC(p, sizeof(void *) * L2_SIZE);
370 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
378 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
384 return pd + (index & (L2_SIZE - 1));
387 static inline PageDesc *page_find(tb_page_addr_t index)
389 return page_find_alloc(index, 0);
392 #if !defined(CONFIG_USER_ONLY)
393 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
402 for (i = P_L2_LEVELS - 1; i > 0; i--) {
408 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
410 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
416 int first_index = index & ~(L2_SIZE - 1);
422 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
424 for (i = 0; i < L2_SIZE; i++) {
425 pd[i].phys_offset = io_mem_unassigned.ram_addr;
426 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
430 return pd + (index & (L2_SIZE - 1));
433 static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
435 PhysPageDesc *p = phys_page_find_alloc(index, 0);
440 return (PhysPageDesc) {
441 .phys_offset = io_mem_unassigned.ram_addr,
442 .region_offset = index << TARGET_PAGE_BITS,
447 static void tlb_protect_code(ram_addr_t ram_addr);
448 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
450 #define mmap_lock() do { } while(0)
451 #define mmap_unlock() do { } while(0)
454 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
456 #if defined(CONFIG_USER_ONLY)
457 /* Currently it is not recommended to allocate big chunks of data in
458 user mode. It will change when a dedicated libc will be used */
459 #define USE_STATIC_CODE_GEN_BUFFER
462 #ifdef USE_STATIC_CODE_GEN_BUFFER
463 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
464 __attribute__((aligned (CODE_GEN_ALIGN)));
467 static void code_gen_alloc(unsigned long tb_size)
469 #ifdef USE_STATIC_CODE_GEN_BUFFER
470 code_gen_buffer = static_code_gen_buffer;
471 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472 map_exec(code_gen_buffer, code_gen_buffer_size);
474 code_gen_buffer_size = tb_size;
475 if (code_gen_buffer_size == 0) {
476 #if defined(CONFIG_USER_ONLY)
477 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
479 /* XXX: needs adjustments */
480 code_gen_buffer_size = (unsigned long)(ram_size / 4);
483 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
484 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
485 /* The code gen buffer location may have constraints depending on
486 the host cpu and OS */
487 #if defined(__linux__)
492 flags = MAP_PRIVATE | MAP_ANONYMOUS;
493 #if defined(__x86_64__)
495 /* Cannot map more than that */
496 if (code_gen_buffer_size > (800 * 1024 * 1024))
497 code_gen_buffer_size = (800 * 1024 * 1024);
498 #elif defined(__sparc_v9__)
499 // Map the buffer below 2G, so we can use direct calls and branches
501 start = (void *) 0x60000000UL;
502 if (code_gen_buffer_size > (512 * 1024 * 1024))
503 code_gen_buffer_size = (512 * 1024 * 1024);
504 #elif defined(__arm__)
505 /* Keep the buffer no bigger than 16MB to branch between blocks */
506 if (code_gen_buffer_size > 16 * 1024 * 1024)
507 code_gen_buffer_size = 16 * 1024 * 1024;
508 #elif defined(__s390x__)
509 /* Map the buffer so that we can use direct calls and branches. */
510 /* We have a +- 4GB range on the branches; leave some slop. */
511 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
512 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
514 start = (void *)0x90000000UL;
516 code_gen_buffer = mmap(start, code_gen_buffer_size,
517 PROT_WRITE | PROT_READ | PROT_EXEC,
519 if (code_gen_buffer == MAP_FAILED) {
520 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
524 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
525 || defined(__DragonFly__) || defined(__OpenBSD__) \
526 || defined(__NetBSD__)
530 flags = MAP_PRIVATE | MAP_ANONYMOUS;
531 #if defined(__x86_64__)
532 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
533 * 0x40000000 is free */
535 addr = (void *)0x40000000;
536 /* Cannot map more than that */
537 if (code_gen_buffer_size > (800 * 1024 * 1024))
538 code_gen_buffer_size = (800 * 1024 * 1024);
539 #elif defined(__sparc_v9__)
540 // Map the buffer below 2G, so we can use direct calls and branches
542 addr = (void *) 0x60000000UL;
543 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
544 code_gen_buffer_size = (512 * 1024 * 1024);
547 code_gen_buffer = mmap(addr, code_gen_buffer_size,
548 PROT_WRITE | PROT_READ | PROT_EXEC,
550 if (code_gen_buffer == MAP_FAILED) {
551 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
556 code_gen_buffer = g_malloc(code_gen_buffer_size);
557 map_exec(code_gen_buffer, code_gen_buffer_size);
559 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
560 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
561 code_gen_buffer_max_size = code_gen_buffer_size -
562 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
563 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
564 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
567 /* Must be called before using the QEMU cpus. 'tb_size' is the size
568 (in bytes) allocated to the translation buffer. Zero means default
570 void tcg_exec_init(unsigned long tb_size)
573 code_gen_alloc(tb_size);
574 code_gen_ptr = code_gen_buffer;
576 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
577 /* There's no guest base to take into account, so go ahead and
578 initialize the prologue now. */
579 tcg_prologue_init(&tcg_ctx);
583 bool tcg_enabled(void)
585 return code_gen_buffer != NULL;
588 void cpu_exec_init_all(void)
590 #if !defined(CONFIG_USER_ONLY)
596 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
598 static int cpu_common_post_load(void *opaque, int version_id)
600 CPUState *env = opaque;
602 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
603 version_id is increased. */
604 env->interrupt_request &= ~0x01;
610 static const VMStateDescription vmstate_cpu_common = {
611 .name = "cpu_common",
613 .minimum_version_id = 1,
614 .minimum_version_id_old = 1,
615 .post_load = cpu_common_post_load,
616 .fields = (VMStateField []) {
617 VMSTATE_UINT32(halted, CPUState),
618 VMSTATE_UINT32(interrupt_request, CPUState),
619 VMSTATE_END_OF_LIST()
624 CPUState *qemu_get_cpu(int cpu)
626 CPUState *env = first_cpu;
629 if (env->cpu_index == cpu)
637 void cpu_exec_init(CPUState *env)
642 #if defined(CONFIG_USER_ONLY)
645 env->next_cpu = NULL;
648 while (*penv != NULL) {
649 penv = &(*penv)->next_cpu;
652 env->cpu_index = cpu_index;
654 QTAILQ_INIT(&env->breakpoints);
655 QTAILQ_INIT(&env->watchpoints);
656 #ifndef CONFIG_USER_ONLY
657 env->thread_id = qemu_get_thread_id();
660 #if defined(CONFIG_USER_ONLY)
663 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
664 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
665 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
666 cpu_save, cpu_load, env);
670 /* Allocate a new translation block. Flush the translation buffer if
671 too many translation blocks or too much generated code. */
672 static TranslationBlock *tb_alloc(target_ulong pc)
674 TranslationBlock *tb;
676 if (nb_tbs >= code_gen_max_blocks ||
677 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
685 void tb_free(TranslationBlock *tb)
687 /* In practice this is mostly used for single use temporary TB
688 Ignore the hard cases and just back up if this TB happens to
689 be the last one generated. */
690 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
691 code_gen_ptr = tb->tc_ptr;
696 static inline void invalidate_page_bitmap(PageDesc *p)
698 if (p->code_bitmap) {
699 g_free(p->code_bitmap);
700 p->code_bitmap = NULL;
702 p->code_write_count = 0;
705 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
707 static void page_flush_tb_1 (int level, void **lp)
716 for (i = 0; i < L2_SIZE; ++i) {
717 pd[i].first_tb = NULL;
718 invalidate_page_bitmap(pd + i);
722 for (i = 0; i < L2_SIZE; ++i) {
723 page_flush_tb_1 (level - 1, pp + i);
728 static void page_flush_tb(void)
731 for (i = 0; i < V_L1_SIZE; i++) {
732 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
736 /* flush all the translation blocks */
737 /* XXX: tb_flush is currently not thread safe */
738 void tb_flush(CPUState *env1)
741 #if defined(DEBUG_FLUSH)
742 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
743 (unsigned long)(code_gen_ptr - code_gen_buffer),
745 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
747 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
748 cpu_abort(env1, "Internal error: code buffer overflow\n");
752 for(env = first_cpu; env != NULL; env = env->next_cpu) {
753 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
756 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
759 code_gen_ptr = code_gen_buffer;
760 /* XXX: flush processor icache at this point if cache flush is
765 #ifdef DEBUG_TB_CHECK
767 static void tb_invalidate_check(target_ulong address)
769 TranslationBlock *tb;
771 address &= TARGET_PAGE_MASK;
772 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
773 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
774 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
775 address >= tb->pc + tb->size)) {
776 printf("ERROR invalidate: address=" TARGET_FMT_lx
777 " PC=%08lx size=%04x\n",
778 address, (long)tb->pc, tb->size);
784 /* verify that all the pages have correct rights for code */
785 static void tb_page_check(void)
787 TranslationBlock *tb;
788 int i, flags1, flags2;
790 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
791 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
792 flags1 = page_get_flags(tb->pc);
793 flags2 = page_get_flags(tb->pc + tb->size - 1);
794 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
795 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
796 (long)tb->pc, tb->size, flags1, flags2);
804 /* invalidate one TB */
805 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
808 TranslationBlock *tb1;
812 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
815 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
819 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
821 TranslationBlock *tb1;
827 tb1 = (TranslationBlock *)((long)tb1 & ~3);
829 *ptb = tb1->page_next[n1];
832 ptb = &tb1->page_next[n1];
836 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
838 TranslationBlock *tb1, **ptb;
841 ptb = &tb->jmp_next[n];
844 /* find tb(n) in circular list */
848 tb1 = (TranslationBlock *)((long)tb1 & ~3);
849 if (n1 == n && tb1 == tb)
852 ptb = &tb1->jmp_first;
854 ptb = &tb1->jmp_next[n1];
857 /* now we can suppress tb(n) from the list */
858 *ptb = tb->jmp_next[n];
860 tb->jmp_next[n] = NULL;
864 /* reset the jump entry 'n' of a TB so that it is not chained to
866 static inline void tb_reset_jump(TranslationBlock *tb, int n)
868 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
871 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
876 tb_page_addr_t phys_pc;
877 TranslationBlock *tb1, *tb2;
879 /* remove the TB from the hash list */
880 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
881 h = tb_phys_hash_func(phys_pc);
882 tb_remove(&tb_phys_hash[h], tb,
883 offsetof(TranslationBlock, phys_hash_next));
885 /* remove the TB from the page list */
886 if (tb->page_addr[0] != page_addr) {
887 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
888 tb_page_remove(&p->first_tb, tb);
889 invalidate_page_bitmap(p);
891 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
892 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
893 tb_page_remove(&p->first_tb, tb);
894 invalidate_page_bitmap(p);
897 tb_invalidated_flag = 1;
899 /* remove the TB from the hash list */
900 h = tb_jmp_cache_hash_func(tb->pc);
901 for(env = first_cpu; env != NULL; env = env->next_cpu) {
902 if (env->tb_jmp_cache[h] == tb)
903 env->tb_jmp_cache[h] = NULL;
906 /* suppress this TB from the two jump lists */
907 tb_jmp_remove(tb, 0);
908 tb_jmp_remove(tb, 1);
910 /* suppress any remaining jumps to this TB */
916 tb1 = (TranslationBlock *)((long)tb1 & ~3);
917 tb2 = tb1->jmp_next[n1];
918 tb_reset_jump(tb1, n1);
919 tb1->jmp_next[n1] = NULL;
922 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
924 tb_phys_invalidate_count++;
927 static inline void set_bits(uint8_t *tab, int start, int len)
933 mask = 0xff << (start & 7);
934 if ((start & ~7) == (end & ~7)) {
936 mask &= ~(0xff << (end & 7));
941 start = (start + 8) & ~7;
943 while (start < end1) {
948 mask = ~(0xff << (end & 7));
954 static void build_page_bitmap(PageDesc *p)
956 int n, tb_start, tb_end;
957 TranslationBlock *tb;
959 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
964 tb = (TranslationBlock *)((long)tb & ~3);
965 /* NOTE: this is subtle as a TB may span two physical pages */
967 /* NOTE: tb_end may be after the end of the page, but
968 it is not a problem */
969 tb_start = tb->pc & ~TARGET_PAGE_MASK;
970 tb_end = tb_start + tb->size;
971 if (tb_end > TARGET_PAGE_SIZE)
972 tb_end = TARGET_PAGE_SIZE;
975 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
977 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
978 tb = tb->page_next[n];
982 TranslationBlock *tb_gen_code(CPUState *env,
983 target_ulong pc, target_ulong cs_base,
984 int flags, int cflags)
986 TranslationBlock *tb;
988 tb_page_addr_t phys_pc, phys_page2;
989 target_ulong virt_page2;
992 phys_pc = get_page_addr_code(env, pc);
995 /* flush must be done */
997 /* cannot fail at this point */
999 /* Don't forget to invalidate previous TB info. */
1000 tb_invalidated_flag = 1;
1002 tc_ptr = code_gen_ptr;
1003 tb->tc_ptr = tc_ptr;
1004 tb->cs_base = cs_base;
1006 tb->cflags = cflags;
1007 cpu_gen_code(env, tb, &code_gen_size);
1008 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1010 /* check next page if needed */
1011 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1013 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1014 phys_page2 = get_page_addr_code(env, virt_page2);
1016 tb_link_page(tb, phys_pc, phys_page2);
1020 /* invalidate all TBs which intersect with the target physical page
1021 starting in range [start;end[. NOTE: start and end must refer to
1022 the same physical page. 'is_cpu_write_access' should be true if called
1023 from a real cpu write access: the virtual CPU will exit the current
1024 TB if code is modified inside this TB. */
1025 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1026 int is_cpu_write_access)
1028 TranslationBlock *tb, *tb_next, *saved_tb;
1029 CPUState *env = cpu_single_env;
1030 tb_page_addr_t tb_start, tb_end;
1033 #ifdef TARGET_HAS_PRECISE_SMC
1034 int current_tb_not_found = is_cpu_write_access;
1035 TranslationBlock *current_tb = NULL;
1036 int current_tb_modified = 0;
1037 target_ulong current_pc = 0;
1038 target_ulong current_cs_base = 0;
1039 int current_flags = 0;
1040 #endif /* TARGET_HAS_PRECISE_SMC */
1042 p = page_find(start >> TARGET_PAGE_BITS);
1045 if (!p->code_bitmap &&
1046 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1047 is_cpu_write_access) {
1048 /* build code bitmap */
1049 build_page_bitmap(p);
1052 /* we remove all the TBs in the range [start, end[ */
1053 /* XXX: see if in some cases it could be faster to invalidate all the code */
1055 while (tb != NULL) {
1057 tb = (TranslationBlock *)((long)tb & ~3);
1058 tb_next = tb->page_next[n];
1059 /* NOTE: this is subtle as a TB may span two physical pages */
1061 /* NOTE: tb_end may be after the end of the page, but
1062 it is not a problem */
1063 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1064 tb_end = tb_start + tb->size;
1066 tb_start = tb->page_addr[1];
1067 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1069 if (!(tb_end <= start || tb_start >= end)) {
1070 #ifdef TARGET_HAS_PRECISE_SMC
1071 if (current_tb_not_found) {
1072 current_tb_not_found = 0;
1074 if (env->mem_io_pc) {
1075 /* now we have a real cpu fault */
1076 current_tb = tb_find_pc(env->mem_io_pc);
1079 if (current_tb == tb &&
1080 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1081 /* If we are modifying the current TB, we must stop
1082 its execution. We could be more precise by checking
1083 that the modification is after the current PC, but it
1084 would require a specialized function to partially
1085 restore the CPU state */
1087 current_tb_modified = 1;
1088 cpu_restore_state(current_tb, env, env->mem_io_pc);
1089 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1092 #endif /* TARGET_HAS_PRECISE_SMC */
1093 /* we need to do that to handle the case where a signal
1094 occurs while doing tb_phys_invalidate() */
1097 saved_tb = env->current_tb;
1098 env->current_tb = NULL;
1100 tb_phys_invalidate(tb, -1);
1102 env->current_tb = saved_tb;
1103 if (env->interrupt_request && env->current_tb)
1104 cpu_interrupt(env, env->interrupt_request);
1109 #if !defined(CONFIG_USER_ONLY)
1110 /* if no code remaining, no need to continue to use slow writes */
1112 invalidate_page_bitmap(p);
1113 if (is_cpu_write_access) {
1114 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1118 #ifdef TARGET_HAS_PRECISE_SMC
1119 if (current_tb_modified) {
1120 /* we generate a block containing just the instruction
1121 modifying the memory. It will ensure that it cannot modify
1123 env->current_tb = NULL;
1124 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1125 cpu_resume_from_signal(env, NULL);
1130 /* len must be <= 8 and start must be a multiple of len */
1131 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1137 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1138 cpu_single_env->mem_io_vaddr, len,
1139 cpu_single_env->eip,
1140 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1143 p = page_find(start >> TARGET_PAGE_BITS);
1146 if (p->code_bitmap) {
1147 offset = start & ~TARGET_PAGE_MASK;
1148 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1149 if (b & ((1 << len) - 1))
1153 tb_invalidate_phys_page_range(start, start + len, 1);
1157 #if !defined(CONFIG_SOFTMMU)
1158 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1159 unsigned long pc, void *puc)
1161 TranslationBlock *tb;
1164 #ifdef TARGET_HAS_PRECISE_SMC
1165 TranslationBlock *current_tb = NULL;
1166 CPUState *env = cpu_single_env;
1167 int current_tb_modified = 0;
1168 target_ulong current_pc = 0;
1169 target_ulong current_cs_base = 0;
1170 int current_flags = 0;
1173 addr &= TARGET_PAGE_MASK;
1174 p = page_find(addr >> TARGET_PAGE_BITS);
1178 #ifdef TARGET_HAS_PRECISE_SMC
1179 if (tb && pc != 0) {
1180 current_tb = tb_find_pc(pc);
1183 while (tb != NULL) {
1185 tb = (TranslationBlock *)((long)tb & ~3);
1186 #ifdef TARGET_HAS_PRECISE_SMC
1187 if (current_tb == tb &&
1188 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1189 /* If we are modifying the current TB, we must stop
1190 its execution. We could be more precise by checking
1191 that the modification is after the current PC, but it
1192 would require a specialized function to partially
1193 restore the CPU state */
1195 current_tb_modified = 1;
1196 cpu_restore_state(current_tb, env, pc);
1197 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1200 #endif /* TARGET_HAS_PRECISE_SMC */
1201 tb_phys_invalidate(tb, addr);
1202 tb = tb->page_next[n];
1205 #ifdef TARGET_HAS_PRECISE_SMC
1206 if (current_tb_modified) {
1207 /* we generate a block containing just the instruction
1208 modifying the memory. It will ensure that it cannot modify
1210 env->current_tb = NULL;
1211 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1212 cpu_resume_from_signal(env, puc);
1218 /* add the tb in the target page and protect it if necessary */
1219 static inline void tb_alloc_page(TranslationBlock *tb,
1220 unsigned int n, tb_page_addr_t page_addr)
1223 #ifndef CONFIG_USER_ONLY
1224 bool page_already_protected;
1227 tb->page_addr[n] = page_addr;
1228 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1229 tb->page_next[n] = p->first_tb;
1230 #ifndef CONFIG_USER_ONLY
1231 page_already_protected = p->first_tb != NULL;
1233 p->first_tb = (TranslationBlock *)((long)tb | n);
1234 invalidate_page_bitmap(p);
1236 #if defined(TARGET_HAS_SMC) || 1
1238 #if defined(CONFIG_USER_ONLY)
1239 if (p->flags & PAGE_WRITE) {
1244 /* force the host page as non writable (writes will have a
1245 page fault + mprotect overhead) */
1246 page_addr &= qemu_host_page_mask;
1248 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1249 addr += TARGET_PAGE_SIZE) {
1251 p2 = page_find (addr >> TARGET_PAGE_BITS);
1255 p2->flags &= ~PAGE_WRITE;
1257 mprotect(g2h(page_addr), qemu_host_page_size,
1258 (prot & PAGE_BITS) & ~PAGE_WRITE);
1259 #ifdef DEBUG_TB_INVALIDATE
1260 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1265 /* if some code is already present, then the pages are already
1266 protected. So we handle the case where only the first TB is
1267 allocated in a physical page */
1268 if (!page_already_protected) {
1269 tlb_protect_code(page_addr);
1273 #endif /* TARGET_HAS_SMC */
1276 /* add a new TB and link it to the physical page tables. phys_page2 is
1277 (-1) to indicate that only one page contains the TB. */
1278 void tb_link_page(TranslationBlock *tb,
1279 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1282 TranslationBlock **ptb;
1284 /* Grab the mmap lock to stop another thread invalidating this TB
1285 before we are done. */
1287 /* add in the physical hash table */
1288 h = tb_phys_hash_func(phys_pc);
1289 ptb = &tb_phys_hash[h];
1290 tb->phys_hash_next = *ptb;
1293 /* add in the page list */
1294 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1295 if (phys_page2 != -1)
1296 tb_alloc_page(tb, 1, phys_page2);
1298 tb->page_addr[1] = -1;
1300 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1301 tb->jmp_next[0] = NULL;
1302 tb->jmp_next[1] = NULL;
1304 /* init original jump addresses */
1305 if (tb->tb_next_offset[0] != 0xffff)
1306 tb_reset_jump(tb, 0);
1307 if (tb->tb_next_offset[1] != 0xffff)
1308 tb_reset_jump(tb, 1);
1310 #ifdef DEBUG_TB_CHECK
1316 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1317 tb[1].tc_ptr. Return NULL if not found */
1318 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1320 int m_min, m_max, m;
1322 TranslationBlock *tb;
1326 if (tc_ptr < (unsigned long)code_gen_buffer ||
1327 tc_ptr >= (unsigned long)code_gen_ptr)
1329 /* binary search (cf Knuth) */
1332 while (m_min <= m_max) {
1333 m = (m_min + m_max) >> 1;
1335 v = (unsigned long)tb->tc_ptr;
1338 else if (tc_ptr < v) {
1347 static void tb_reset_jump_recursive(TranslationBlock *tb);
1349 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1351 TranslationBlock *tb1, *tb_next, **ptb;
1354 tb1 = tb->jmp_next[n];
1356 /* find head of list */
1359 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1362 tb1 = tb1->jmp_next[n1];
1364 /* we are now sure now that tb jumps to tb1 */
1367 /* remove tb from the jmp_first list */
1368 ptb = &tb_next->jmp_first;
1372 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1373 if (n1 == n && tb1 == tb)
1375 ptb = &tb1->jmp_next[n1];
1377 *ptb = tb->jmp_next[n];
1378 tb->jmp_next[n] = NULL;
1380 /* suppress the jump to next tb in generated code */
1381 tb_reset_jump(tb, n);
1383 /* suppress jumps in the tb on which we could have jumped */
1384 tb_reset_jump_recursive(tb_next);
1388 static void tb_reset_jump_recursive(TranslationBlock *tb)
1390 tb_reset_jump_recursive2(tb, 0);
1391 tb_reset_jump_recursive2(tb, 1);
1394 #if defined(TARGET_HAS_ICE)
1395 #if defined(CONFIG_USER_ONLY)
1396 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1398 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1401 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1403 target_phys_addr_t addr;
1405 ram_addr_t ram_addr;
1408 addr = cpu_get_phys_page_debug(env, pc);
1409 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1411 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1412 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1415 #endif /* TARGET_HAS_ICE */
1417 #if defined(CONFIG_USER_ONLY)
1418 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1423 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1424 int flags, CPUWatchpoint **watchpoint)
1429 /* Add a watchpoint. */
1430 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1431 int flags, CPUWatchpoint **watchpoint)
1433 target_ulong len_mask = ~(len - 1);
1436 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1437 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1438 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1439 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1442 wp = g_malloc(sizeof(*wp));
1445 wp->len_mask = len_mask;
1448 /* keep all GDB-injected watchpoints in front */
1450 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1452 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1454 tlb_flush_page(env, addr);
1461 /* Remove a specific watchpoint. */
1462 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1465 target_ulong len_mask = ~(len - 1);
1468 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1469 if (addr == wp->vaddr && len_mask == wp->len_mask
1470 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1471 cpu_watchpoint_remove_by_ref(env, wp);
1478 /* Remove a specific watchpoint by reference. */
1479 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1481 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1483 tlb_flush_page(env, watchpoint->vaddr);
1488 /* Remove all matching watchpoints. */
1489 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1491 CPUWatchpoint *wp, *next;
1493 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1494 if (wp->flags & mask)
1495 cpu_watchpoint_remove_by_ref(env, wp);
1500 /* Add a breakpoint. */
1501 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1502 CPUBreakpoint **breakpoint)
1504 #if defined(TARGET_HAS_ICE)
1507 bp = g_malloc(sizeof(*bp));
1512 /* keep all GDB-injected breakpoints in front */
1514 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1516 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1518 breakpoint_invalidate(env, pc);
1528 /* Remove a specific breakpoint. */
1529 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1531 #if defined(TARGET_HAS_ICE)
1534 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1535 if (bp->pc == pc && bp->flags == flags) {
1536 cpu_breakpoint_remove_by_ref(env, bp);
1546 /* Remove a specific breakpoint by reference. */
1547 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1549 #if defined(TARGET_HAS_ICE)
1550 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1552 breakpoint_invalidate(env, breakpoint->pc);
1558 /* Remove all matching breakpoints. */
1559 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1561 #if defined(TARGET_HAS_ICE)
1562 CPUBreakpoint *bp, *next;
1564 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1565 if (bp->flags & mask)
1566 cpu_breakpoint_remove_by_ref(env, bp);
1571 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1572 CPU loop after each instruction */
1573 void cpu_single_step(CPUState *env, int enabled)
1575 #if defined(TARGET_HAS_ICE)
1576 if (env->singlestep_enabled != enabled) {
1577 env->singlestep_enabled = enabled;
1579 kvm_update_guest_debug(env, 0);
1581 /* must flush all the translated code to avoid inconsistencies */
1582 /* XXX: only flush what is necessary */
1589 /* enable or disable low levels log */
1590 void cpu_set_log(int log_flags)
1592 loglevel = log_flags;
1593 if (loglevel && !logfile) {
1594 logfile = fopen(logfilename, log_append ? "a" : "w");
1596 perror(logfilename);
1599 #if !defined(CONFIG_SOFTMMU)
1600 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1602 static char logfile_buf[4096];
1603 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1605 #elif defined(_WIN32)
1606 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1607 setvbuf(logfile, NULL, _IONBF, 0);
1609 setvbuf(logfile, NULL, _IOLBF, 0);
1613 if (!loglevel && logfile) {
1619 void cpu_set_log_filename(const char *filename)
1621 logfilename = strdup(filename);
1626 cpu_set_log(loglevel);
1629 static void cpu_unlink_tb(CPUState *env)
1631 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1632 problem and hope the cpu will stop of its own accord. For userspace
1633 emulation this often isn't actually as bad as it sounds. Often
1634 signals are used primarily to interrupt blocking syscalls. */
1635 TranslationBlock *tb;
1636 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1638 spin_lock(&interrupt_lock);
1639 tb = env->current_tb;
1640 /* if the cpu is currently executing code, we must unlink it and
1641 all the potentially executing TB */
1643 env->current_tb = NULL;
1644 tb_reset_jump_recursive(tb);
1646 spin_unlock(&interrupt_lock);
1649 #ifndef CONFIG_USER_ONLY
1650 /* mask must never be zero, except for A20 change call */
1651 static void tcg_handle_interrupt(CPUState *env, int mask)
1655 old_mask = env->interrupt_request;
1656 env->interrupt_request |= mask;
1659 * If called from iothread context, wake the target cpu in
1662 if (!qemu_cpu_is_self(env)) {
1668 env->icount_decr.u16.high = 0xffff;
1670 && (mask & ~old_mask) != 0) {
1671 cpu_abort(env, "Raised interrupt while not in I/O function");
1678 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1680 #else /* CONFIG_USER_ONLY */
1682 void cpu_interrupt(CPUState *env, int mask)
1684 env->interrupt_request |= mask;
1687 #endif /* CONFIG_USER_ONLY */
1689 void cpu_reset_interrupt(CPUState *env, int mask)
1691 env->interrupt_request &= ~mask;
1694 void cpu_exit(CPUState *env)
1696 env->exit_request = 1;
1700 const CPULogItem cpu_log_items[] = {
1701 { CPU_LOG_TB_OUT_ASM, "out_asm",
1702 "show generated host assembly code for each compiled TB" },
1703 { CPU_LOG_TB_IN_ASM, "in_asm",
1704 "show target assembly code for each compiled TB" },
1705 { CPU_LOG_TB_OP, "op",
1706 "show micro ops for each compiled TB" },
1707 { CPU_LOG_TB_OP_OPT, "op_opt",
1710 "before eflags optimization and "
1712 "after liveness analysis" },
1713 { CPU_LOG_INT, "int",
1714 "show interrupts/exceptions in short format" },
1715 { CPU_LOG_EXEC, "exec",
1716 "show trace before each executed TB (lots of logs)" },
1717 { CPU_LOG_TB_CPU, "cpu",
1718 "show CPU state before block translation" },
1720 { CPU_LOG_PCALL, "pcall",
1721 "show protected mode far calls/returns/exceptions" },
1722 { CPU_LOG_RESET, "cpu_reset",
1723 "show CPU state before CPU resets" },
1726 { CPU_LOG_IOPORT, "ioport",
1727 "show all i/o ports accesses" },
1732 static int cmp1(const char *s1, int n, const char *s2)
1734 if (strlen(s2) != n)
1736 return memcmp(s1, s2, n) == 0;
1739 /* takes a comma separated list of log masks. Return 0 if error. */
1740 int cpu_str_to_log_mask(const char *str)
1742 const CPULogItem *item;
1749 p1 = strchr(p, ',');
1752 if(cmp1(p,p1-p,"all")) {
1753 for(item = cpu_log_items; item->mask != 0; item++) {
1757 for(item = cpu_log_items; item->mask != 0; item++) {
1758 if (cmp1(p, p1 - p, item->name))
1772 void cpu_abort(CPUState *env, const char *fmt, ...)
1779 fprintf(stderr, "qemu: fatal: ");
1780 vfprintf(stderr, fmt, ap);
1781 fprintf(stderr, "\n");
1783 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1785 cpu_dump_state(env, stderr, fprintf, 0);
1787 if (qemu_log_enabled()) {
1788 qemu_log("qemu: fatal: ");
1789 qemu_log_vprintf(fmt, ap2);
1792 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1794 log_cpu_state(env, 0);
1801 #if defined(CONFIG_USER_ONLY)
1803 struct sigaction act;
1804 sigfillset(&act.sa_mask);
1805 act.sa_handler = SIG_DFL;
1806 sigaction(SIGABRT, &act, NULL);
1812 CPUState *cpu_copy(CPUState *env)
1814 CPUState *new_env = cpu_init(env->cpu_model_str);
1815 CPUState *next_cpu = new_env->next_cpu;
1816 int cpu_index = new_env->cpu_index;
1817 #if defined(TARGET_HAS_ICE)
1822 memcpy(new_env, env, sizeof(CPUState));
1824 /* Preserve chaining and index. */
1825 new_env->next_cpu = next_cpu;
1826 new_env->cpu_index = cpu_index;
1828 /* Clone all break/watchpoints.
1829 Note: Once we support ptrace with hw-debug register access, make sure
1830 BP_CPU break/watchpoints are handled correctly on clone. */
1831 QTAILQ_INIT(&env->breakpoints);
1832 QTAILQ_INIT(&env->watchpoints);
1833 #if defined(TARGET_HAS_ICE)
1834 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1835 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1837 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1838 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1846 #if !defined(CONFIG_USER_ONLY)
1848 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1852 /* Discard jump cache entries for any tb which might potentially
1853 overlap the flushed page. */
1854 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1855 memset (&env->tb_jmp_cache[i], 0,
1856 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1858 i = tb_jmp_cache_hash_page(addr);
1859 memset (&env->tb_jmp_cache[i], 0,
1860 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1863 static CPUTLBEntry s_cputlb_empty_entry = {
1871 * If flush_global is true (the usual case), flush all tlb entries.
1872 * If flush_global is false, flush (at least) all tlb entries not
1875 * Since QEMU doesn't currently implement a global/not-global flag
1876 * for tlb entries, at the moment tlb_flush() will also flush all
1877 * tlb entries in the flush_global == false case. This is OK because
1878 * CPU architectures generally permit an implementation to drop
1879 * entries from the TLB at any time, so flushing more entries than
1880 * required is only an efficiency issue, not a correctness issue.
1882 void tlb_flush(CPUState *env, int flush_global)
1886 #if defined(DEBUG_TLB)
1887 printf("tlb_flush:\n");
1889 /* must reset current TB so that interrupts cannot modify the
1890 links while we are modifying them */
1891 env->current_tb = NULL;
1893 for(i = 0; i < CPU_TLB_SIZE; i++) {
1895 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1896 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1900 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1902 env->tlb_flush_addr = -1;
1903 env->tlb_flush_mask = 0;
1907 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1909 if (addr == (tlb_entry->addr_read &
1910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1911 addr == (tlb_entry->addr_write &
1912 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1913 addr == (tlb_entry->addr_code &
1914 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1915 *tlb_entry = s_cputlb_empty_entry;
1919 void tlb_flush_page(CPUState *env, target_ulong addr)
1924 #if defined(DEBUG_TLB)
1925 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1927 /* Check if we need to flush due to large pages. */
1928 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1929 #if defined(DEBUG_TLB)
1930 printf("tlb_flush_page: forced full flush ("
1931 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1932 env->tlb_flush_addr, env->tlb_flush_mask);
1937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env->current_tb = NULL;
1941 addr &= TARGET_PAGE_MASK;
1942 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1943 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1944 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1946 tlb_flush_jmp_cache(env, addr);
1949 /* update the TLBs so that writes to code in the virtual page 'addr'
1951 static void tlb_protect_code(ram_addr_t ram_addr)
1953 cpu_physical_memory_reset_dirty(ram_addr,
1954 ram_addr + TARGET_PAGE_SIZE,
1958 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1959 tested for self modifying code */
1960 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1963 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1966 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1967 unsigned long start, unsigned long length)
1970 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
1971 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1972 if ((addr - start) < length) {
1973 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1978 /* Note: start and end must be within the same ram block. */
1979 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1983 unsigned long length, start1;
1986 start &= TARGET_PAGE_MASK;
1987 end = TARGET_PAGE_ALIGN(end);
1989 length = end - start;
1992 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1994 /* we modify the TLB cache so that the dirty bit will be set again
1995 when accessing the range */
1996 start1 = (unsigned long)qemu_safe_ram_ptr(start);
1997 /* Check that we don't span multiple blocks - this breaks the
1998 address comparisons below. */
1999 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2000 != (end - 1) - start) {
2004 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2006 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2007 for(i = 0; i < CPU_TLB_SIZE; i++)
2008 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2014 int cpu_physical_memory_set_dirty_tracking(int enable)
2017 in_migration = enable;
2021 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2023 ram_addr_t ram_addr;
2026 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
2027 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2028 + tlb_entry->addend);
2029 ram_addr = qemu_ram_addr_from_host_nofail(p);
2030 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2031 tlb_entry->addr_write |= TLB_NOTDIRTY;
2036 /* update the TLB according to the current state of the dirty bits */
2037 void cpu_tlb_update_dirty(CPUState *env)
2041 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2042 for(i = 0; i < CPU_TLB_SIZE; i++)
2043 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2047 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2049 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2050 tlb_entry->addr_write = vaddr;
2053 /* update the TLB corresponding to virtual page vaddr
2054 so that it is no longer dirty */
2055 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2060 vaddr &= TARGET_PAGE_MASK;
2061 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2062 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2063 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2066 /* Our TLB does not support large pages, so remember the area covered by
2067 large pages and trigger a full TLB flush if these are invalidated. */
2068 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2071 target_ulong mask = ~(size - 1);
2073 if (env->tlb_flush_addr == (target_ulong)-1) {
2074 env->tlb_flush_addr = vaddr & mask;
2075 env->tlb_flush_mask = mask;
2078 /* Extend the existing region to include the new page.
2079 This is a compromise between unnecessary flushes and the cost
2080 of maintaining a full variable size TLB. */
2081 mask &= env->tlb_flush_mask;
2082 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2085 env->tlb_flush_addr &= mask;
2086 env->tlb_flush_mask = mask;
2089 static bool is_ram_rom(ram_addr_t pd)
2091 pd &= ~TARGET_PAGE_MASK;
2092 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
2095 static bool is_romd(ram_addr_t pd)
2099 pd &= ~TARGET_PAGE_MASK;
2100 mr = io_mem_region[pd];
2101 return mr->rom_device && mr->readable;
2104 static bool is_ram_rom_romd(ram_addr_t pd)
2106 return is_ram_rom(pd) || is_romd(pd);
2109 /* Add a new TLB entry. At most one entry for a given virtual address
2110 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2111 supplied size is only used by tlb_flush_page. */
2112 void tlb_set_page(CPUState *env, target_ulong vaddr,
2113 target_phys_addr_t paddr, int prot,
2114 int mmu_idx, target_ulong size)
2119 target_ulong address;
2120 target_ulong code_address;
2121 unsigned long addend;
2124 target_phys_addr_t iotlb;
2126 assert(size >= TARGET_PAGE_SIZE);
2127 if (size != TARGET_PAGE_SIZE) {
2128 tlb_add_large_page(env, vaddr, size);
2130 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2132 #if defined(DEBUG_TLB)
2133 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2134 " prot=%x idx=%d pd=0x%08lx\n",
2135 vaddr, paddr, prot, mmu_idx, pd);
2139 if (!is_ram_rom_romd(pd)) {
2140 /* IO memory case (romd handled later) */
2141 address |= TLB_MMIO;
2143 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2144 if (is_ram_rom(pd)) {
2146 iotlb = pd & TARGET_PAGE_MASK;
2147 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2148 iotlb |= io_mem_notdirty.ram_addr;
2150 iotlb |= io_mem_rom.ram_addr;
2152 /* IO handlers are currently passed a physical address.
2153 It would be nice to pass an offset from the base address
2154 of that region. This would avoid having to special case RAM,
2155 and avoid full address decoding in every device.
2156 We can't use the high bits of pd for this because
2157 IO_MEM_ROMD uses these as a ram address. */
2158 iotlb = (pd & ~TARGET_PAGE_MASK);
2159 iotlb += p.region_offset;
2162 code_address = address;
2163 /* Make accesses to pages with watchpoints go via the
2164 watchpoint trap routines. */
2165 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2166 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2167 /* Avoid trapping reads of pages with a write breakpoint. */
2168 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2169 iotlb = io_mem_watch.ram_addr + paddr;
2170 address |= TLB_MMIO;
2176 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2177 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2178 te = &env->tlb_table[mmu_idx][index];
2179 te->addend = addend - vaddr;
2180 if (prot & PAGE_READ) {
2181 te->addr_read = address;
2186 if (prot & PAGE_EXEC) {
2187 te->addr_code = code_address;
2191 if (prot & PAGE_WRITE) {
2192 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
2193 /* Write access calls the I/O callback. */
2194 te->addr_write = address | TLB_MMIO;
2195 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
2196 !cpu_physical_memory_is_dirty(pd)) {
2197 te->addr_write = address | TLB_NOTDIRTY;
2199 te->addr_write = address;
2202 te->addr_write = -1;
2208 void tlb_flush(CPUState *env, int flush_global)
2212 void tlb_flush_page(CPUState *env, target_ulong addr)
2217 * Walks guest process memory "regions" one by one
2218 * and calls callback function 'fn' for each region.
2221 struct walk_memory_regions_data
2223 walk_memory_regions_fn fn;
2225 unsigned long start;
2229 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2230 abi_ulong end, int new_prot)
2232 if (data->start != -1ul) {
2233 int rc = data->fn(data->priv, data->start, end, data->prot);
2239 data->start = (new_prot ? end : -1ul);
2240 data->prot = new_prot;
2245 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2246 abi_ulong base, int level, void **lp)
2252 return walk_memory_regions_end(data, base, 0);
2257 for (i = 0; i < L2_SIZE; ++i) {
2258 int prot = pd[i].flags;
2260 pa = base | (i << TARGET_PAGE_BITS);
2261 if (prot != data->prot) {
2262 rc = walk_memory_regions_end(data, pa, prot);
2270 for (i = 0; i < L2_SIZE; ++i) {
2271 pa = base | ((abi_ulong)i <<
2272 (TARGET_PAGE_BITS + L2_BITS * level));
2273 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2283 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2285 struct walk_memory_regions_data data;
2293 for (i = 0; i < V_L1_SIZE; i++) {
2294 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2295 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2301 return walk_memory_regions_end(&data, 0, 0);
2304 static int dump_region(void *priv, abi_ulong start,
2305 abi_ulong end, unsigned long prot)
2307 FILE *f = (FILE *)priv;
2309 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2310 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2311 start, end, end - start,
2312 ((prot & PAGE_READ) ? 'r' : '-'),
2313 ((prot & PAGE_WRITE) ? 'w' : '-'),
2314 ((prot & PAGE_EXEC) ? 'x' : '-'));
2319 /* dump memory mappings */
2320 void page_dump(FILE *f)
2322 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2323 "start", "end", "size", "prot");
2324 walk_memory_regions(f, dump_region);
2327 int page_get_flags(target_ulong address)
2331 p = page_find(address >> TARGET_PAGE_BITS);
2337 /* Modify the flags of a page and invalidate the code if necessary.
2338 The flag PAGE_WRITE_ORG is positioned automatically depending
2339 on PAGE_WRITE. The mmap_lock should already be held. */
2340 void page_set_flags(target_ulong start, target_ulong end, int flags)
2342 target_ulong addr, len;
2344 /* This function should never be called with addresses outside the
2345 guest address space. If this assert fires, it probably indicates
2346 a missing call to h2g_valid. */
2347 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2348 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2350 assert(start < end);
2352 start = start & TARGET_PAGE_MASK;
2353 end = TARGET_PAGE_ALIGN(end);
2355 if (flags & PAGE_WRITE) {
2356 flags |= PAGE_WRITE_ORG;
2359 for (addr = start, len = end - start;
2361 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2362 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2364 /* If the write protection bit is set, then we invalidate
2366 if (!(p->flags & PAGE_WRITE) &&
2367 (flags & PAGE_WRITE) &&
2369 tb_invalidate_phys_page(addr, 0, NULL);
2375 int page_check_range(target_ulong start, target_ulong len, int flags)
2381 /* This function should never be called with addresses outside the
2382 guest address space. If this assert fires, it probably indicates
2383 a missing call to h2g_valid. */
2384 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2385 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2391 if (start + len - 1 < start) {
2392 /* We've wrapped around. */
2396 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2397 start = start & TARGET_PAGE_MASK;
2399 for (addr = start, len = end - start;
2401 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2402 p = page_find(addr >> TARGET_PAGE_BITS);
2405 if( !(p->flags & PAGE_VALID) )
2408 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2410 if (flags & PAGE_WRITE) {
2411 if (!(p->flags & PAGE_WRITE_ORG))
2413 /* unprotect the page if it was put read-only because it
2414 contains translated code */
2415 if (!(p->flags & PAGE_WRITE)) {
2416 if (!page_unprotect(addr, 0, NULL))
2425 /* called from signal handler: invalidate the code and unprotect the
2426 page. Return TRUE if the fault was successfully handled. */
2427 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2431 target_ulong host_start, host_end, addr;
2433 /* Technically this isn't safe inside a signal handler. However we
2434 know this only ever happens in a synchronous SEGV handler, so in
2435 practice it seems to be ok. */
2438 p = page_find(address >> TARGET_PAGE_BITS);
2444 /* if the page was really writable, then we change its
2445 protection back to writable */
2446 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2447 host_start = address & qemu_host_page_mask;
2448 host_end = host_start + qemu_host_page_size;
2451 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2452 p = page_find(addr >> TARGET_PAGE_BITS);
2453 p->flags |= PAGE_WRITE;
2456 /* and since the content will be modified, we must invalidate
2457 the corresponding translated code. */
2458 tb_invalidate_phys_page(addr, pc, puc);
2459 #ifdef DEBUG_TB_CHECK
2460 tb_invalidate_check(addr);
2463 mprotect((void *)g2h(host_start), qemu_host_page_size,
2473 static inline void tlb_set_dirty(CPUState *env,
2474 unsigned long addr, target_ulong vaddr)
2477 #endif /* defined(CONFIG_USER_ONLY) */
2479 #if !defined(CONFIG_USER_ONLY)
2481 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2482 typedef struct subpage_t {
2484 target_phys_addr_t base;
2485 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2486 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2489 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2490 ram_addr_t memory, ram_addr_t region_offset);
2491 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2492 ram_addr_t orig_memory,
2493 ram_addr_t region_offset);
2494 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2497 if (addr > start_addr) \
2500 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2501 if (start_addr2 > 0) \
2505 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2506 end_addr2 = TARGET_PAGE_SIZE - 1; \
2508 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2509 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2514 static void destroy_page_desc(PhysPageDesc pd)
2516 unsigned io_index = pd.phys_offset & ~TARGET_PAGE_MASK;
2517 MemoryRegion *mr = io_mem_region[io_index];
2520 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2521 memory_region_destroy(&subpage->iomem);
2526 static void destroy_l2_mapping(void **lp, unsigned level)
2538 for (i = 0; i < L2_SIZE; ++i) {
2539 destroy_l2_mapping(&p[i], level - 1);
2544 for (i = 0; i < L2_SIZE; ++i) {
2545 destroy_page_desc(pd[i]);
2552 static void destroy_all_mappings(void)
2554 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
2557 /* register physical memory.
2558 For RAM, 'size' must be a multiple of the target page size.
2559 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2560 io memory page. The address used when calling the IO function is
2561 the offset from the start of the region, plus region_offset. Both
2562 start_addr and region_offset are rounded down to a page boundary
2563 before calculating this offset. This should not be a problem unless
2564 the low bits of start_addr and region_offset differ. */
2565 void cpu_register_physical_memory_log(MemoryRegionSection *section,
2568 target_phys_addr_t start_addr = section->offset_within_address_space;
2569 ram_addr_t size = section->size;
2570 ram_addr_t phys_offset = section->mr->ram_addr;
2571 ram_addr_t region_offset = section->offset_within_region;
2572 target_phys_addr_t addr, end_addr;
2575 ram_addr_t orig_size = size;
2578 if (memory_region_is_ram(section->mr)) {
2579 phys_offset += region_offset;
2584 phys_offset |= io_mem_rom.ram_addr;
2589 if (phys_offset == io_mem_unassigned.ram_addr) {
2590 region_offset = start_addr;
2592 region_offset &= TARGET_PAGE_MASK;
2593 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2594 end_addr = start_addr + (target_phys_addr_t)size;
2598 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
2599 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
2600 ram_addr_t orig_memory = p->phys_offset;
2601 target_phys_addr_t start_addr2, end_addr2;
2602 int need_subpage = 0;
2603 MemoryRegion *mr = io_mem_region[orig_memory & ~TARGET_PAGE_MASK];
2605 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2608 if (!(mr->subpage)) {
2609 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2610 &p->phys_offset, orig_memory,
2613 subpage = container_of(mr, subpage_t, iomem);
2615 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2617 p->region_offset = 0;
2619 p->phys_offset = phys_offset;
2620 p->region_offset = region_offset;
2621 if (is_ram_rom_romd(phys_offset))
2622 phys_offset += TARGET_PAGE_SIZE;
2625 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2626 p->phys_offset = phys_offset;
2627 p->region_offset = region_offset;
2628 if (is_ram_rom_romd(phys_offset)) {
2629 phys_offset += TARGET_PAGE_SIZE;
2631 target_phys_addr_t start_addr2, end_addr2;
2632 int need_subpage = 0;
2634 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2635 end_addr2, need_subpage);
2638 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2640 io_mem_unassigned.ram_addr,
2641 addr & TARGET_PAGE_MASK);
2642 subpage_register(subpage, start_addr2, end_addr2,
2643 phys_offset, region_offset);
2644 p->region_offset = 0;
2648 region_offset += TARGET_PAGE_SIZE;
2649 addr += TARGET_PAGE_SIZE;
2650 } while (addr != end_addr);
2652 /* since each CPU stores ram addresses in its TLB cache, we must
2653 reset the modified entries */
2655 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2660 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2663 kvm_coalesce_mmio_region(addr, size);
2666 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2669 kvm_uncoalesce_mmio_region(addr, size);
2672 void qemu_flush_coalesced_mmio_buffer(void)
2675 kvm_flush_coalesced_mmio_buffer();
2678 #if defined(__linux__) && !defined(TARGET_S390X)
2680 #include <sys/vfs.h>
2682 #define HUGETLBFS_MAGIC 0x958458f6
2684 static long gethugepagesize(const char *path)
2690 ret = statfs(path, &fs);
2691 } while (ret != 0 && errno == EINTR);
2698 if (fs.f_type != HUGETLBFS_MAGIC)
2699 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2704 static void *file_ram_alloc(RAMBlock *block,
2714 unsigned long hpagesize;
2716 hpagesize = gethugepagesize(path);
2721 if (memory < hpagesize) {
2725 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2726 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2730 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2734 fd = mkstemp(filename);
2736 perror("unable to create backing store for hugepages");
2743 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2746 * ftruncate is not supported by hugetlbfs in older
2747 * hosts, so don't bother bailing out on errors.
2748 * If anything goes wrong with it under other filesystems,
2751 if (ftruncate(fd, memory))
2752 perror("ftruncate");
2755 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2756 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2757 * to sidestep this quirk.
2759 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2760 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2762 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2764 if (area == MAP_FAILED) {
2765 perror("file_ram_alloc: can't mmap RAM pages");
2774 static ram_addr_t find_ram_offset(ram_addr_t size)
2776 RAMBlock *block, *next_block;
2777 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2779 if (QLIST_EMPTY(&ram_list.blocks))
2782 QLIST_FOREACH(block, &ram_list.blocks, next) {
2783 ram_addr_t end, next = RAM_ADDR_MAX;
2785 end = block->offset + block->length;
2787 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2788 if (next_block->offset >= end) {
2789 next = MIN(next, next_block->offset);
2792 if (next - end >= size && next - end < mingap) {
2794 mingap = next - end;
2798 if (offset == RAM_ADDR_MAX) {
2799 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2807 static ram_addr_t last_ram_offset(void)
2810 ram_addr_t last = 0;
2812 QLIST_FOREACH(block, &ram_list.blocks, next)
2813 last = MAX(last, block->offset + block->length);
2818 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2820 RAMBlock *new_block, *block;
2823 QLIST_FOREACH(block, &ram_list.blocks, next) {
2824 if (block->offset == addr) {
2830 assert(!new_block->idstr[0]);
2832 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2833 char *id = dev->parent_bus->info->get_dev_path(dev);
2835 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2839 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2841 QLIST_FOREACH(block, &ram_list.blocks, next) {
2842 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2843 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2850 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2853 RAMBlock *new_block;
2855 size = TARGET_PAGE_ALIGN(size);
2856 new_block = g_malloc0(sizeof(*new_block));
2859 new_block->offset = find_ram_offset(size);
2861 new_block->host = host;
2862 new_block->flags |= RAM_PREALLOC_MASK;
2865 #if defined (__linux__) && !defined(TARGET_S390X)
2866 new_block->host = file_ram_alloc(new_block, size, mem_path);
2867 if (!new_block->host) {
2868 new_block->host = qemu_vmalloc(size);
2869 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2872 fprintf(stderr, "-mem-path option unsupported\n");
2876 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2877 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2878 an system defined value, which is at least 256GB. Larger systems
2879 have larger values. We put the guest between the end of data
2880 segment (system break) and this value. We use 32GB as a base to
2881 have enough room for the system break to grow. */
2882 new_block->host = mmap((void*)0x800000000, size,
2883 PROT_EXEC|PROT_READ|PROT_WRITE,
2884 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2885 if (new_block->host == MAP_FAILED) {
2886 fprintf(stderr, "Allocating RAM failed\n");
2890 if (xen_enabled()) {
2891 xen_ram_alloc(new_block->offset, size, mr);
2893 new_block->host = qemu_vmalloc(size);
2896 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2899 new_block->length = size;
2901 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2903 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2904 last_ram_offset() >> TARGET_PAGE_BITS);
2905 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2906 0xff, size >> TARGET_PAGE_BITS);
2909 kvm_setup_guest_memory(new_block->host, size);
2911 return new_block->offset;
2914 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2916 return qemu_ram_alloc_from_ptr(size, NULL, mr);
2919 void qemu_ram_free_from_ptr(ram_addr_t addr)
2923 QLIST_FOREACH(block, &ram_list.blocks, next) {
2924 if (addr == block->offset) {
2925 QLIST_REMOVE(block, next);
2932 void qemu_ram_free(ram_addr_t addr)
2936 QLIST_FOREACH(block, &ram_list.blocks, next) {
2937 if (addr == block->offset) {
2938 QLIST_REMOVE(block, next);
2939 if (block->flags & RAM_PREALLOC_MASK) {
2941 } else if (mem_path) {
2942 #if defined (__linux__) && !defined(TARGET_S390X)
2944 munmap(block->host, block->length);
2947 qemu_vfree(block->host);
2953 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2954 munmap(block->host, block->length);
2956 if (xen_enabled()) {
2957 xen_invalidate_map_cache_entry(block->host);
2959 qemu_vfree(block->host);
2971 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2978 QLIST_FOREACH(block, &ram_list.blocks, next) {
2979 offset = addr - block->offset;
2980 if (offset < block->length) {
2981 vaddr = block->host + offset;
2982 if (block->flags & RAM_PREALLOC_MASK) {
2986 munmap(vaddr, length);
2988 #if defined(__linux__) && !defined(TARGET_S390X)
2991 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2994 flags |= MAP_PRIVATE;
2996 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2997 flags, block->fd, offset);
2999 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3000 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3007 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3008 flags |= MAP_SHARED | MAP_ANONYMOUS;
3009 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3012 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3013 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3017 if (area != vaddr) {
3018 fprintf(stderr, "Could not remap addr: "
3019 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
3023 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3029 #endif /* !_WIN32 */
3031 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3032 With the exception of the softmmu code in this file, this should
3033 only be used for local memory (e.g. video ram) that the device owns,
3034 and knows it isn't going to access beyond the end of the block.
3036 It should not be used for general purpose DMA.
3037 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3039 void *qemu_get_ram_ptr(ram_addr_t addr)
3043 QLIST_FOREACH(block, &ram_list.blocks, next) {
3044 if (addr - block->offset < block->length) {
3045 /* Move this entry to to start of the list. */
3046 if (block != QLIST_FIRST(&ram_list.blocks)) {
3047 QLIST_REMOVE(block, next);
3048 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3050 if (xen_enabled()) {
3051 /* We need to check if the requested address is in the RAM
3052 * because we don't want to map the entire memory in QEMU.
3053 * In that case just map until the end of the page.
3055 if (block->offset == 0) {
3056 return xen_map_cache(addr, 0, 0);
3057 } else if (block->host == NULL) {
3059 xen_map_cache(block->offset, block->length, 1);
3062 return block->host + (addr - block->offset);
3066 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3072 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3073 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3075 void *qemu_safe_ram_ptr(ram_addr_t addr)
3079 QLIST_FOREACH(block, &ram_list.blocks, next) {
3080 if (addr - block->offset < block->length) {
3081 if (xen_enabled()) {
3082 /* We need to check if the requested address is in the RAM
3083 * because we don't want to map the entire memory in QEMU.
3084 * In that case just map until the end of the page.
3086 if (block->offset == 0) {
3087 return xen_map_cache(addr, 0, 0);
3088 } else if (block->host == NULL) {
3090 xen_map_cache(block->offset, block->length, 1);
3093 return block->host + (addr - block->offset);
3097 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3103 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3104 * but takes a size argument */
3105 void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
3110 if (xen_enabled()) {
3111 return xen_map_cache(addr, *size, 1);
3115 QLIST_FOREACH(block, &ram_list.blocks, next) {
3116 if (addr - block->offset < block->length) {
3117 if (addr - block->offset + *size > block->length)
3118 *size = block->length - addr + block->offset;
3119 return block->host + (addr - block->offset);
3123 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3128 void qemu_put_ram_ptr(void *addr)
3130 trace_qemu_put_ram_ptr(addr);
3133 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3136 uint8_t *host = ptr;
3138 if (xen_enabled()) {
3139 *ram_addr = xen_ram_addr_from_mapcache(ptr);
3143 QLIST_FOREACH(block, &ram_list.blocks, next) {
3144 /* This case append when the block is not mapped. */
3145 if (block->host == NULL) {
3148 if (host - block->host < block->length) {
3149 *ram_addr = block->offset + (host - block->host);
3157 /* Some of the softmmu routines need to translate from a host pointer
3158 (typically a TLB entry) back to a ram offset. */
3159 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3161 ram_addr_t ram_addr;
3163 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3164 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3170 static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3173 #ifdef DEBUG_UNASSIGNED
3174 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3176 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3177 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
3182 static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3183 uint64_t val, unsigned size)
3185 #ifdef DEBUG_UNASSIGNED
3186 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
3188 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3189 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
3193 static const MemoryRegionOps unassigned_mem_ops = {
3194 .read = unassigned_mem_read,
3195 .write = unassigned_mem_write,
3196 .endianness = DEVICE_NATIVE_ENDIAN,
3199 static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3205 static void error_mem_write(void *opaque, target_phys_addr_t addr,
3206 uint64_t value, unsigned size)
3211 static const MemoryRegionOps error_mem_ops = {
3212 .read = error_mem_read,
3213 .write = error_mem_write,
3214 .endianness = DEVICE_NATIVE_ENDIAN,
3217 static const MemoryRegionOps rom_mem_ops = {
3218 .read = error_mem_read,
3219 .write = unassigned_mem_write,
3220 .endianness = DEVICE_NATIVE_ENDIAN,
3223 static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3224 uint64_t val, unsigned size)
3227 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3228 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3229 #if !defined(CONFIG_USER_ONLY)
3230 tb_invalidate_phys_page_fast(ram_addr, size);
3231 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3236 stb_p(qemu_get_ram_ptr(ram_addr), val);
3239 stw_p(qemu_get_ram_ptr(ram_addr), val);
3242 stl_p(qemu_get_ram_ptr(ram_addr), val);
3247 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3248 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3249 /* we remove the notdirty callback only if the code has been
3251 if (dirty_flags == 0xff)
3252 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3255 static const MemoryRegionOps notdirty_mem_ops = {
3256 .read = error_mem_read,
3257 .write = notdirty_mem_write,
3258 .endianness = DEVICE_NATIVE_ENDIAN,
3261 /* Generate a debug exception if a watchpoint has been hit. */
3262 static void check_watchpoint(int offset, int len_mask, int flags)
3264 CPUState *env = cpu_single_env;
3265 target_ulong pc, cs_base;
3266 TranslationBlock *tb;
3271 if (env->watchpoint_hit) {
3272 /* We re-entered the check after replacing the TB. Now raise
3273 * the debug interrupt so that is will trigger after the
3274 * current instruction. */
3275 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3278 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3279 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3280 if ((vaddr == (wp->vaddr & len_mask) ||
3281 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3282 wp->flags |= BP_WATCHPOINT_HIT;
3283 if (!env->watchpoint_hit) {
3284 env->watchpoint_hit = wp;
3285 tb = tb_find_pc(env->mem_io_pc);
3287 cpu_abort(env, "check_watchpoint: could not find TB for "
3288 "pc=%p", (void *)env->mem_io_pc);
3290 cpu_restore_state(tb, env, env->mem_io_pc);
3291 tb_phys_invalidate(tb, -1);
3292 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3293 env->exception_index = EXCP_DEBUG;
3295 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3296 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3298 cpu_resume_from_signal(env, NULL);
3301 wp->flags &= ~BP_WATCHPOINT_HIT;
3306 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3307 so these check for a hit then pass through to the normal out-of-line
3309 static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3312 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3314 case 1: return ldub_phys(addr);
3315 case 2: return lduw_phys(addr);
3316 case 4: return ldl_phys(addr);
3321 static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3322 uint64_t val, unsigned size)
3324 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3326 case 1: stb_phys(addr, val);
3327 case 2: stw_phys(addr, val);
3328 case 4: stl_phys(addr, val);
3333 static const MemoryRegionOps watch_mem_ops = {
3334 .read = watch_mem_read,
3335 .write = watch_mem_write,
3336 .endianness = DEVICE_NATIVE_ENDIAN,
3339 static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3342 subpage_t *mmio = opaque;
3343 unsigned int idx = SUBPAGE_IDX(addr);
3344 #if defined(DEBUG_SUBPAGE)
3345 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3346 mmio, len, addr, idx);
3349 addr += mmio->region_offset[idx];
3350 idx = mmio->sub_io_index[idx];
3351 return io_mem_read(idx, addr, len);
3354 static void subpage_write(void *opaque, target_phys_addr_t addr,
3355 uint64_t value, unsigned len)
3357 subpage_t *mmio = opaque;
3358 unsigned int idx = SUBPAGE_IDX(addr);
3359 #if defined(DEBUG_SUBPAGE)
3360 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3361 " idx %d value %"PRIx64"\n",
3362 __func__, mmio, len, addr, idx, value);
3365 addr += mmio->region_offset[idx];
3366 idx = mmio->sub_io_index[idx];
3367 io_mem_write(idx, addr, value, len);
3370 static const MemoryRegionOps subpage_ops = {
3371 .read = subpage_read,
3372 .write = subpage_write,
3373 .endianness = DEVICE_NATIVE_ENDIAN,
3376 static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3379 ram_addr_t raddr = addr;
3380 void *ptr = qemu_get_ram_ptr(raddr);
3382 case 1: return ldub_p(ptr);
3383 case 2: return lduw_p(ptr);
3384 case 4: return ldl_p(ptr);
3389 static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3390 uint64_t value, unsigned size)
3392 ram_addr_t raddr = addr;
3393 void *ptr = qemu_get_ram_ptr(raddr);
3395 case 1: return stb_p(ptr, value);
3396 case 2: return stw_p(ptr, value);
3397 case 4: return stl_p(ptr, value);
3402 static const MemoryRegionOps subpage_ram_ops = {
3403 .read = subpage_ram_read,
3404 .write = subpage_ram_write,
3405 .endianness = DEVICE_NATIVE_ENDIAN,
3408 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3409 ram_addr_t memory, ram_addr_t region_offset)
3413 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3415 idx = SUBPAGE_IDX(start);
3416 eidx = SUBPAGE_IDX(end);
3417 #if defined(DEBUG_SUBPAGE)
3418 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3419 mmio, start, end, idx, eidx, memory);
3421 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
3422 memory = io_mem_subpage_ram.ram_addr;
3424 memory &= IO_MEM_NB_ENTRIES - 1;
3425 for (; idx <= eidx; idx++) {
3426 mmio->sub_io_index[idx] = memory;
3427 mmio->region_offset[idx] = region_offset;
3433 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3434 ram_addr_t orig_memory,
3435 ram_addr_t region_offset)
3440 mmio = g_malloc0(sizeof(subpage_t));
3443 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3444 "subpage", TARGET_PAGE_SIZE);
3445 mmio->iomem.subpage = true;
3446 subpage_memory = mmio->iomem.ram_addr;
3447 #if defined(DEBUG_SUBPAGE)
3448 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3449 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3451 *phys = subpage_memory;
3452 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3457 static int get_free_io_mem_idx(void)
3461 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3462 if (!io_mem_used[i]) {
3466 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3470 /* mem_read and mem_write are arrays of functions containing the
3471 function to access byte (index 0), word (index 1) and dword (index
3472 2). Functions can be omitted with a NULL function pointer.
3473 If io_index is non zero, the corresponding io zone is
3474 modified. If it is zero, a new io zone is allocated. The return
3475 value can be used with cpu_register_physical_memory(). (-1) is
3476 returned if error. */
3477 static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
3479 if (io_index <= 0) {
3480 io_index = get_free_io_mem_idx();
3484 if (io_index >= IO_MEM_NB_ENTRIES)
3488 io_mem_region[io_index] = mr;
3493 int cpu_register_io_memory(MemoryRegion *mr)
3495 return cpu_register_io_memory_fixed(0, mr);
3498 void cpu_unregister_io_memory(int io_index)
3500 io_mem_region[io_index] = NULL;
3501 io_mem_used[io_index] = 0;
3504 static void io_mem_init(void)
3508 /* Must be first: */
3509 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3510 assert(io_mem_ram.ram_addr == 0);
3511 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3512 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3513 "unassigned", UINT64_MAX);
3514 memory_region_init_io(&io_mem_notdirty, ¬dirty_mem_ops, NULL,
3515 "notdirty", UINT64_MAX);
3516 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3517 "subpage-ram", UINT64_MAX);
3521 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3522 "watch", UINT64_MAX);
3525 static void core_begin(MemoryListener *listener)
3527 destroy_all_mappings();
3530 static void core_commit(MemoryListener *listener)
3534 static void core_region_add(MemoryListener *listener,
3535 MemoryRegionSection *section)
3537 cpu_register_physical_memory_log(section, section->readonly);
3540 static void core_region_del(MemoryListener *listener,
3541 MemoryRegionSection *section)
3545 static void core_region_nop(MemoryListener *listener,
3546 MemoryRegionSection *section)
3548 cpu_register_physical_memory_log(section, section->readonly);
3551 static void core_log_start(MemoryListener *listener,
3552 MemoryRegionSection *section)
3556 static void core_log_stop(MemoryListener *listener,
3557 MemoryRegionSection *section)
3561 static void core_log_sync(MemoryListener *listener,
3562 MemoryRegionSection *section)
3566 static void core_log_global_start(MemoryListener *listener)
3568 cpu_physical_memory_set_dirty_tracking(1);
3571 static void core_log_global_stop(MemoryListener *listener)
3573 cpu_physical_memory_set_dirty_tracking(0);
3576 static void core_eventfd_add(MemoryListener *listener,
3577 MemoryRegionSection *section,
3578 bool match_data, uint64_t data, int fd)
3582 static void core_eventfd_del(MemoryListener *listener,
3583 MemoryRegionSection *section,
3584 bool match_data, uint64_t data, int fd)
3588 static void io_begin(MemoryListener *listener)
3592 static void io_commit(MemoryListener *listener)
3596 static void io_region_add(MemoryListener *listener,
3597 MemoryRegionSection *section)
3599 iorange_init(§ion->mr->iorange, &memory_region_iorange_ops,
3600 section->offset_within_address_space, section->size);
3601 ioport_register(§ion->mr->iorange);
3604 static void io_region_del(MemoryListener *listener,
3605 MemoryRegionSection *section)
3607 isa_unassign_ioport(section->offset_within_address_space, section->size);
3610 static void io_region_nop(MemoryListener *listener,
3611 MemoryRegionSection *section)
3615 static void io_log_start(MemoryListener *listener,
3616 MemoryRegionSection *section)
3620 static void io_log_stop(MemoryListener *listener,
3621 MemoryRegionSection *section)
3625 static void io_log_sync(MemoryListener *listener,
3626 MemoryRegionSection *section)
3630 static void io_log_global_start(MemoryListener *listener)
3634 static void io_log_global_stop(MemoryListener *listener)
3638 static void io_eventfd_add(MemoryListener *listener,
3639 MemoryRegionSection *section,
3640 bool match_data, uint64_t data, int fd)
3644 static void io_eventfd_del(MemoryListener *listener,
3645 MemoryRegionSection *section,
3646 bool match_data, uint64_t data, int fd)
3650 static MemoryListener core_memory_listener = {
3651 .begin = core_begin,
3652 .commit = core_commit,
3653 .region_add = core_region_add,
3654 .region_del = core_region_del,
3655 .region_nop = core_region_nop,
3656 .log_start = core_log_start,
3657 .log_stop = core_log_stop,
3658 .log_sync = core_log_sync,
3659 .log_global_start = core_log_global_start,
3660 .log_global_stop = core_log_global_stop,
3661 .eventfd_add = core_eventfd_add,
3662 .eventfd_del = core_eventfd_del,
3666 static MemoryListener io_memory_listener = {
3668 .commit = io_commit,
3669 .region_add = io_region_add,
3670 .region_del = io_region_del,
3671 .region_nop = io_region_nop,
3672 .log_start = io_log_start,
3673 .log_stop = io_log_stop,
3674 .log_sync = io_log_sync,
3675 .log_global_start = io_log_global_start,
3676 .log_global_stop = io_log_global_stop,
3677 .eventfd_add = io_eventfd_add,
3678 .eventfd_del = io_eventfd_del,
3682 static void memory_map_init(void)
3684 system_memory = g_malloc(sizeof(*system_memory));
3685 memory_region_init(system_memory, "system", INT64_MAX);
3686 set_system_memory_map(system_memory);
3688 system_io = g_malloc(sizeof(*system_io));
3689 memory_region_init(system_io, "io", 65536);
3690 set_system_io_map(system_io);
3692 memory_listener_register(&core_memory_listener, system_memory);
3693 memory_listener_register(&io_memory_listener, system_io);
3696 MemoryRegion *get_system_memory(void)
3698 return system_memory;
3701 MemoryRegion *get_system_io(void)
3706 #endif /* !defined(CONFIG_USER_ONLY) */
3708 /* physical memory access (slow version, mainly for debug) */
3709 #if defined(CONFIG_USER_ONLY)
3710 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3711 uint8_t *buf, int len, int is_write)
3718 page = addr & TARGET_PAGE_MASK;
3719 l = (page + TARGET_PAGE_SIZE) - addr;
3722 flags = page_get_flags(page);
3723 if (!(flags & PAGE_VALID))
3726 if (!(flags & PAGE_WRITE))
3728 /* XXX: this code should not depend on lock_user */
3729 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3732 unlock_user(p, addr, l);
3734 if (!(flags & PAGE_READ))
3736 /* XXX: this code should not depend on lock_user */
3737 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3740 unlock_user(p, addr, 0);
3750 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3751 int len, int is_write)
3756 target_phys_addr_t page;
3761 page = addr & TARGET_PAGE_MASK;
3762 l = (page + TARGET_PAGE_SIZE) - addr;
3765 p = phys_page_find(page >> TARGET_PAGE_BITS);
3769 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
3770 target_phys_addr_t addr1;
3771 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3772 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3773 /* XXX: could force cpu_single_env to NULL to avoid
3775 if (l >= 4 && ((addr1 & 3) == 0)) {
3776 /* 32 bit write access */
3778 io_mem_write(io_index, addr1, val, 4);
3780 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3781 /* 16 bit write access */
3783 io_mem_write(io_index, addr1, val, 2);
3786 /* 8 bit write access */
3788 io_mem_write(io_index, addr1, val, 1);
3793 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3795 ptr = qemu_get_ram_ptr(addr1);
3796 memcpy(ptr, buf, l);
3797 if (!cpu_physical_memory_is_dirty(addr1)) {
3798 /* invalidate code */
3799 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3801 cpu_physical_memory_set_dirty_flags(
3802 addr1, (0xff & ~CODE_DIRTY_FLAG));
3804 qemu_put_ram_ptr(ptr);
3807 if (!is_ram_rom_romd(pd)) {
3808 target_phys_addr_t addr1;
3810 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3811 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3812 if (l >= 4 && ((addr1 & 3) == 0)) {
3813 /* 32 bit read access */
3814 val = io_mem_read(io_index, addr1, 4);
3817 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3818 /* 16 bit read access */
3819 val = io_mem_read(io_index, addr1, 2);
3823 /* 8 bit read access */
3824 val = io_mem_read(io_index, addr1, 1);
3830 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3831 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3832 qemu_put_ram_ptr(ptr);
3841 /* used for ROM loading : can write in RAM and ROM */
3842 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3843 const uint8_t *buf, int len)
3847 target_phys_addr_t page;
3852 page = addr & TARGET_PAGE_MASK;
3853 l = (page + TARGET_PAGE_SIZE) - addr;
3856 p = phys_page_find(page >> TARGET_PAGE_BITS);
3859 if (!is_ram_rom_romd(pd)) {
3862 unsigned long addr1;
3863 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3865 ptr = qemu_get_ram_ptr(addr1);
3866 memcpy(ptr, buf, l);
3867 qemu_put_ram_ptr(ptr);
3877 target_phys_addr_t addr;
3878 target_phys_addr_t len;
3881 static BounceBuffer bounce;
3883 typedef struct MapClient {
3885 void (*callback)(void *opaque);
3886 QLIST_ENTRY(MapClient) link;
3889 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3890 = QLIST_HEAD_INITIALIZER(map_client_list);
3892 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3894 MapClient *client = g_malloc(sizeof(*client));
3896 client->opaque = opaque;
3897 client->callback = callback;
3898 QLIST_INSERT_HEAD(&map_client_list, client, link);
3902 void cpu_unregister_map_client(void *_client)
3904 MapClient *client = (MapClient *)_client;
3906 QLIST_REMOVE(client, link);
3910 static void cpu_notify_map_clients(void)
3914 while (!QLIST_EMPTY(&map_client_list)) {
3915 client = QLIST_FIRST(&map_client_list);
3916 client->callback(client->opaque);
3917 cpu_unregister_map_client(client);
3921 /* Map a physical memory region into a host virtual address.
3922 * May map a subset of the requested range, given by and returned in *plen.
3923 * May return NULL if resources needed to perform the mapping are exhausted.
3924 * Use only for reads OR writes - not for read-modify-write operations.
3925 * Use cpu_register_map_client() to know when retrying the map operation is
3926 * likely to succeed.
3928 void *cpu_physical_memory_map(target_phys_addr_t addr,
3929 target_phys_addr_t *plen,
3932 target_phys_addr_t len = *plen;
3933 target_phys_addr_t todo = 0;
3935 target_phys_addr_t page;
3938 ram_addr_t raddr = RAM_ADDR_MAX;
3943 page = addr & TARGET_PAGE_MASK;
3944 l = (page + TARGET_PAGE_SIZE) - addr;
3947 p = phys_page_find(page >> TARGET_PAGE_BITS);
3950 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
3951 if (todo || bounce.buffer) {
3954 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3958 cpu_physical_memory_read(addr, bounce.buffer, l);
3962 return bounce.buffer;
3965 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3973 ret = qemu_ram_ptr_length(raddr, &rlen);
3978 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3979 * Will also mark the memory as dirty if is_write == 1. access_len gives
3980 * the amount of memory that was actually read or written by the caller.
3982 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3983 int is_write, target_phys_addr_t access_len)
3985 if (buffer != bounce.buffer) {
3987 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3988 while (access_len) {
3990 l = TARGET_PAGE_SIZE;
3993 if (!cpu_physical_memory_is_dirty(addr1)) {
3994 /* invalidate code */
3995 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3997 cpu_physical_memory_set_dirty_flags(
3998 addr1, (0xff & ~CODE_DIRTY_FLAG));
4004 if (xen_enabled()) {
4005 xen_invalidate_map_cache_entry(buffer);
4010 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4012 qemu_vfree(bounce.buffer);
4013 bounce.buffer = NULL;
4014 cpu_notify_map_clients();
4017 /* warning: addr must be aligned */
4018 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4019 enum device_endian endian)
4027 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4030 if (!is_ram_rom_romd(pd)) {
4032 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4033 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4034 val = io_mem_read(io_index, addr, 4);
4035 #if defined(TARGET_WORDS_BIGENDIAN)
4036 if (endian == DEVICE_LITTLE_ENDIAN) {
4040 if (endian == DEVICE_BIG_ENDIAN) {
4046 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4047 (addr & ~TARGET_PAGE_MASK);
4049 case DEVICE_LITTLE_ENDIAN:
4050 val = ldl_le_p(ptr);
4052 case DEVICE_BIG_ENDIAN:
4053 val = ldl_be_p(ptr);
4063 uint32_t ldl_phys(target_phys_addr_t addr)
4065 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4068 uint32_t ldl_le_phys(target_phys_addr_t addr)
4070 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4073 uint32_t ldl_be_phys(target_phys_addr_t addr)
4075 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4078 /* warning: addr must be aligned */
4079 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4080 enum device_endian endian)
4088 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4091 if (!is_ram_rom_romd(pd)) {
4093 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4094 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4096 /* XXX This is broken when device endian != cpu endian.
4097 Fix and add "endian" variable check */
4098 #ifdef TARGET_WORDS_BIGENDIAN
4099 val = io_mem_read(io_index, addr, 4) << 32;
4100 val |= io_mem_read(io_index, addr + 4, 4);
4102 val = io_mem_read(io_index, addr, 4);
4103 val |= io_mem_read(io_index, addr + 4, 4) << 32;
4107 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4108 (addr & ~TARGET_PAGE_MASK);
4110 case DEVICE_LITTLE_ENDIAN:
4111 val = ldq_le_p(ptr);
4113 case DEVICE_BIG_ENDIAN:
4114 val = ldq_be_p(ptr);
4124 uint64_t ldq_phys(target_phys_addr_t addr)
4126 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4129 uint64_t ldq_le_phys(target_phys_addr_t addr)
4131 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4134 uint64_t ldq_be_phys(target_phys_addr_t addr)
4136 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4140 uint32_t ldub_phys(target_phys_addr_t addr)
4143 cpu_physical_memory_read(addr, &val, 1);
4147 /* warning: addr must be aligned */
4148 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4149 enum device_endian endian)
4157 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4160 if (!is_ram_rom_romd(pd)) {
4162 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4163 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4164 val = io_mem_read(io_index, addr, 2);
4165 #if defined(TARGET_WORDS_BIGENDIAN)
4166 if (endian == DEVICE_LITTLE_ENDIAN) {
4170 if (endian == DEVICE_BIG_ENDIAN) {
4176 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4177 (addr & ~TARGET_PAGE_MASK);
4179 case DEVICE_LITTLE_ENDIAN:
4180 val = lduw_le_p(ptr);
4182 case DEVICE_BIG_ENDIAN:
4183 val = lduw_be_p(ptr);
4193 uint32_t lduw_phys(target_phys_addr_t addr)
4195 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4198 uint32_t lduw_le_phys(target_phys_addr_t addr)
4200 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4203 uint32_t lduw_be_phys(target_phys_addr_t addr)
4205 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4208 /* warning: addr must be aligned. The ram page is not masked as dirty
4209 and the code inside is not invalidated. It is useful if the dirty
4210 bits are used to track modified PTEs */
4211 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4218 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4221 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4222 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4223 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4224 io_mem_write(io_index, addr, val, 4);
4226 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4227 ptr = qemu_get_ram_ptr(addr1);
4230 if (unlikely(in_migration)) {
4231 if (!cpu_physical_memory_is_dirty(addr1)) {
4232 /* invalidate code */
4233 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4235 cpu_physical_memory_set_dirty_flags(
4236 addr1, (0xff & ~CODE_DIRTY_FLAG));
4242 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4249 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4252 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4253 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4254 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4255 #ifdef TARGET_WORDS_BIGENDIAN
4256 io_mem_write(io_index, addr, val >> 32, 4);
4257 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
4259 io_mem_write(io_index, addr, (uint32_t)val, 4);
4260 io_mem_write(io_index, addr + 4, val >> 32, 4);
4263 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4264 (addr & ~TARGET_PAGE_MASK);
4269 /* warning: addr must be aligned */
4270 static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4271 enum device_endian endian)
4278 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4281 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4282 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4283 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4284 #if defined(TARGET_WORDS_BIGENDIAN)
4285 if (endian == DEVICE_LITTLE_ENDIAN) {
4289 if (endian == DEVICE_BIG_ENDIAN) {
4293 io_mem_write(io_index, addr, val, 4);
4295 unsigned long addr1;
4296 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4298 ptr = qemu_get_ram_ptr(addr1);
4300 case DEVICE_LITTLE_ENDIAN:
4303 case DEVICE_BIG_ENDIAN:
4310 if (!cpu_physical_memory_is_dirty(addr1)) {
4311 /* invalidate code */
4312 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4314 cpu_physical_memory_set_dirty_flags(addr1,
4315 (0xff & ~CODE_DIRTY_FLAG));
4320 void stl_phys(target_phys_addr_t addr, uint32_t val)
4322 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4325 void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4327 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4330 void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4332 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4336 void stb_phys(target_phys_addr_t addr, uint32_t val)
4339 cpu_physical_memory_write(addr, &v, 1);
4342 /* warning: addr must be aligned */
4343 static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4344 enum device_endian endian)
4351 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4354 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4355 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4356 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4357 #if defined(TARGET_WORDS_BIGENDIAN)
4358 if (endian == DEVICE_LITTLE_ENDIAN) {
4362 if (endian == DEVICE_BIG_ENDIAN) {
4366 io_mem_write(io_index, addr, val, 2);
4368 unsigned long addr1;
4369 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4371 ptr = qemu_get_ram_ptr(addr1);
4373 case DEVICE_LITTLE_ENDIAN:
4376 case DEVICE_BIG_ENDIAN:
4383 if (!cpu_physical_memory_is_dirty(addr1)) {
4384 /* invalidate code */
4385 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4387 cpu_physical_memory_set_dirty_flags(addr1,
4388 (0xff & ~CODE_DIRTY_FLAG));
4393 void stw_phys(target_phys_addr_t addr, uint32_t val)
4395 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4398 void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4400 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4403 void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4405 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4409 void stq_phys(target_phys_addr_t addr, uint64_t val)
4412 cpu_physical_memory_write(addr, &val, 8);
4415 void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4417 val = cpu_to_le64(val);
4418 cpu_physical_memory_write(addr, &val, 8);
4421 void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4423 val = cpu_to_be64(val);
4424 cpu_physical_memory_write(addr, &val, 8);
4427 /* virtual memory access for debug (includes writing to ROM) */
4428 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4429 uint8_t *buf, int len, int is_write)
4432 target_phys_addr_t phys_addr;
4436 page = addr & TARGET_PAGE_MASK;
4437 phys_addr = cpu_get_phys_page_debug(env, page);
4438 /* if no physical page mapped, return an error */
4439 if (phys_addr == -1)
4441 l = (page + TARGET_PAGE_SIZE) - addr;
4444 phys_addr += (addr & ~TARGET_PAGE_MASK);
4446 cpu_physical_memory_write_rom(phys_addr, buf, l);
4448 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4457 /* in deterministic execution mode, instructions doing device I/Os
4458 must be at the end of the TB */
4459 void cpu_io_recompile(CPUState *env, void *retaddr)
4461 TranslationBlock *tb;
4463 target_ulong pc, cs_base;
4466 tb = tb_find_pc((unsigned long)retaddr);
4468 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4471 n = env->icount_decr.u16.low + tb->icount;
4472 cpu_restore_state(tb, env, (unsigned long)retaddr);
4473 /* Calculate how many instructions had been executed before the fault
4475 n = n - env->icount_decr.u16.low;
4476 /* Generate a new TB ending on the I/O insn. */
4478 /* On MIPS and SH, delay slot instructions can only be restarted if
4479 they were already the first instruction in the TB. If this is not
4480 the first instruction in a TB then re-execute the preceding
4482 #if defined(TARGET_MIPS)
4483 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4484 env->active_tc.PC -= 4;
4485 env->icount_decr.u16.low++;
4486 env->hflags &= ~MIPS_HFLAG_BMASK;
4488 #elif defined(TARGET_SH4)
4489 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4492 env->icount_decr.u16.low++;
4493 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4496 /* This should never happen. */
4497 if (n > CF_COUNT_MASK)
4498 cpu_abort(env, "TB too big during recompile");
4500 cflags = n | CF_LAST_IO;
4502 cs_base = tb->cs_base;
4504 tb_phys_invalidate(tb, -1);
4505 /* FIXME: In theory this could raise an exception. In practice
4506 we have already translated the block once so it's probably ok. */
4507 tb_gen_code(env, pc, cs_base, flags, cflags);
4508 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4509 the first in the TB) then we end up generating a whole new TB and
4510 repeating the fault, which is horribly inefficient.
4511 Better would be to execute just this insn uncached, or generate a
4513 cpu_resume_from_signal(env, NULL);
4516 #if !defined(CONFIG_USER_ONLY)
4518 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4520 int i, target_code_size, max_target_code_size;
4521 int direct_jmp_count, direct_jmp2_count, cross_page;
4522 TranslationBlock *tb;
4524 target_code_size = 0;
4525 max_target_code_size = 0;
4527 direct_jmp_count = 0;
4528 direct_jmp2_count = 0;
4529 for(i = 0; i < nb_tbs; i++) {
4531 target_code_size += tb->size;
4532 if (tb->size > max_target_code_size)
4533 max_target_code_size = tb->size;
4534 if (tb->page_addr[1] != -1)
4536 if (tb->tb_next_offset[0] != 0xffff) {
4538 if (tb->tb_next_offset[1] != 0xffff) {
4539 direct_jmp2_count++;
4543 /* XXX: avoid using doubles ? */
4544 cpu_fprintf(f, "Translation buffer state:\n");
4545 cpu_fprintf(f, "gen code size %td/%ld\n",
4546 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4547 cpu_fprintf(f, "TB count %d/%d\n",
4548 nb_tbs, code_gen_max_blocks);
4549 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4550 nb_tbs ? target_code_size / nb_tbs : 0,
4551 max_target_code_size);
4552 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4553 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4554 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4555 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4557 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4558 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4560 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4562 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4563 cpu_fprintf(f, "\nStatistics:\n");
4564 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4565 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4566 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4567 tcg_dump_info(f, cpu_fprintf);
4570 /* NOTE: this function can trigger an exception */
4571 /* NOTE2: the returned address is not exactly the physical address: it
4572 is the offset relative to phys_ram_base */
4573 tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4575 int mmu_idx, page_index, pd;
4578 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4579 mmu_idx = cpu_mmu_index(env1);
4580 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4581 (addr & TARGET_PAGE_MASK))) {
4584 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
4585 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4587 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4588 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4590 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4593 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4594 return qemu_ram_addr_from_host_nofail(p);
4598 * A helper function for the _utterly broken_ virtio device model to find out if
4599 * it's running on a big endian machine. Don't do this at home kids!
4601 bool virtio_is_big_endian(void);
4602 bool virtio_is_big_endian(void)
4604 #if defined(TARGET_WORDS_BIGENDIAN)
4611 #define MMUSUFFIX _cmmu
4613 #define GETPC() NULL
4614 #define env cpu_single_env
4615 #define SOFTMMU_CODE_ACCESS
4618 #include "softmmu_template.h"
4621 #include "softmmu_template.h"
4624 #include "softmmu_template.h"
4627 #include "softmmu_template.h"