2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
63 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock *tbs;
83 static int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 static uint8_t *code_gen_ptr;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration;
116 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
118 static MemoryRegion *system_memory;
119 static MemoryRegion *system_io;
124 /* current CPU in the current thread. It is only valid inside
126 DEFINE_TLS(CPUState *,cpu_single_env);
127 /* 0 = Do not count executed instructions.
128 1 = Precise instruction counting.
129 2 = Adaptive rate instruction counting. */
132 typedef struct PageDesc {
133 /* list of TBs intersecting this ram page */
134 TranslationBlock *first_tb;
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139 #if defined(CONFIG_USER_ONLY)
144 /* In system mode we want L1_MAP to be based on ram offsets,
145 while in user mode we want it to be based on virtual addresses. */
146 #if !defined(CONFIG_USER_ONLY)
147 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
150 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
156 /* Size of the L2 (and L3, etc) page tables. */
158 #define L2_SIZE (1 << L2_BITS)
160 /* The bits remaining after N lower levels of page tables. */
161 #define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163 #define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166 /* Size of the L1 page table. Avoid silly small sizes. */
167 #if P_L1_BITS_REM < 4
168 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
170 #define P_L1_BITS P_L1_BITS_REM
173 #if V_L1_BITS_REM < 4
174 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
176 #define V_L1_BITS V_L1_BITS_REM
179 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
182 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
185 unsigned long qemu_real_host_page_size;
186 unsigned long qemu_host_page_size;
187 unsigned long qemu_host_page_mask;
189 /* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191 static void *l1_map[V_L1_SIZE];
193 #if !defined(CONFIG_USER_ONLY)
194 typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
200 /* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202 static void *l1_phys_map[P_L1_SIZE];
204 static void io_mem_init(void);
205 static void memory_map_init(void);
207 /* io memory support */
208 CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
209 CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
210 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
211 static char io_mem_used[IO_MEM_NB_ENTRIES];
212 static int io_mem_watch;
217 static const char *logfilename = "qemu.log";
219 static const char *logfilename = "/tmp/qemu.log";
223 static int log_append = 0;
226 #if !defined(CONFIG_USER_ONLY)
227 static int tlb_flush_count;
229 static int tb_flush_count;
230 static int tb_phys_invalidate_count;
233 static void map_exec(void *addr, long size)
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
241 static void map_exec(void *addr, long size)
243 unsigned long start, end, page_size;
245 page_size = getpagesize();
246 start = (unsigned long)addr;
247 start &= ~(page_size - 1);
249 end = (unsigned long)addr + size;
250 end += page_size - 1;
251 end &= ~(page_size - 1);
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
258 static void page_init(void)
260 /* NOTE: we can always suppose that qemu_host_page_size >=
264 SYSTEM_INFO system_info;
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
270 qemu_real_host_page_size = getpagesize();
272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
276 qemu_host_page_mask = ~(qemu_host_page_size - 1);
278 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
280 #ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry *freep;
284 freep = kinfo_getvmmap(getpid(), &cnt);
287 for (i = 0; i < cnt; i++) {
288 unsigned long startaddr, endaddr;
290 startaddr = freep[i].kve_start;
291 endaddr = freep[i].kve_end;
292 if (h2g_valid(startaddr)) {
293 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295 if (h2g_valid(endaddr)) {
296 endaddr = h2g(endaddr);
297 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
312 last_brk = (unsigned long)sbrk(0);
314 f = fopen("/compat/linux/proc/self/maps", "r");
319 unsigned long startaddr, endaddr;
322 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324 if (n == 2 && h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
332 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
344 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
350 #if defined(CONFIG_USER_ONLY)
351 /* We can't use g_malloc because it may recurse into a locked mutex. */
352 # define ALLOC(P, SIZE) \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
358 # define ALLOC(P, SIZE) \
359 do { P = g_malloc0(SIZE); } while (0)
362 /* Level 1. Always allocated. */
363 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
366 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
373 ALLOC(p, sizeof(void *) * L2_SIZE);
377 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
385 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
391 return pd + (index & (L2_SIZE - 1));
394 static inline PageDesc *page_find(tb_page_addr_t index)
396 return page_find_alloc(index, 0);
399 #if !defined(CONFIG_USER_ONLY)
400 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
406 /* Level 1. Always allocated. */
407 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
410 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
416 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
418 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
424 int first_index = index & ~(L2_SIZE - 1);
430 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
432 for (i = 0; i < L2_SIZE; i++) {
433 pd[i].phys_offset = IO_MEM_UNASSIGNED;
434 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
438 return pd + (index & (L2_SIZE - 1));
441 static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
443 PhysPageDesc *p = phys_page_find_alloc(index, 0);
448 return (PhysPageDesc) {
449 .phys_offset = IO_MEM_UNASSIGNED,
450 .region_offset = index << TARGET_PAGE_BITS,
455 static void tlb_protect_code(ram_addr_t ram_addr);
456 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
458 #define mmap_lock() do { } while(0)
459 #define mmap_unlock() do { } while(0)
462 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
464 #if defined(CONFIG_USER_ONLY)
465 /* Currently it is not recommended to allocate big chunks of data in
466 user mode. It will change when a dedicated libc will be used */
467 #define USE_STATIC_CODE_GEN_BUFFER
470 #ifdef USE_STATIC_CODE_GEN_BUFFER
471 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
472 __attribute__((aligned (CODE_GEN_ALIGN)));
475 static void code_gen_alloc(unsigned long tb_size)
477 #ifdef USE_STATIC_CODE_GEN_BUFFER
478 code_gen_buffer = static_code_gen_buffer;
479 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
480 map_exec(code_gen_buffer, code_gen_buffer_size);
482 code_gen_buffer_size = tb_size;
483 if (code_gen_buffer_size == 0) {
484 #if defined(CONFIG_USER_ONLY)
485 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487 /* XXX: needs adjustments */
488 code_gen_buffer_size = (unsigned long)(ram_size / 4);
491 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
492 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
493 /* The code gen buffer location may have constraints depending on
494 the host cpu and OS */
495 #if defined(__linux__)
500 flags = MAP_PRIVATE | MAP_ANONYMOUS;
501 #if defined(__x86_64__)
503 /* Cannot map more than that */
504 if (code_gen_buffer_size > (800 * 1024 * 1024))
505 code_gen_buffer_size = (800 * 1024 * 1024);
506 #elif defined(__sparc_v9__)
507 // Map the buffer below 2G, so we can use direct calls and branches
509 start = (void *) 0x60000000UL;
510 if (code_gen_buffer_size > (512 * 1024 * 1024))
511 code_gen_buffer_size = (512 * 1024 * 1024);
512 #elif defined(__arm__)
513 /* Keep the buffer no bigger than 16GB to branch between blocks */
514 if (code_gen_buffer_size > 16 * 1024 * 1024)
515 code_gen_buffer_size = 16 * 1024 * 1024;
516 #elif defined(__s390x__)
517 /* Map the buffer so that we can use direct calls and branches. */
518 /* We have a +- 4GB range on the branches; leave some slop. */
519 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
520 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
522 start = (void *)0x90000000UL;
524 code_gen_buffer = mmap(start, code_gen_buffer_size,
525 PROT_WRITE | PROT_READ | PROT_EXEC,
527 if (code_gen_buffer == MAP_FAILED) {
528 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
532 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
533 || defined(__DragonFly__) || defined(__OpenBSD__) \
534 || defined(__NetBSD__)
538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539 #if defined(__x86_64__)
540 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
541 * 0x40000000 is free */
543 addr = (void *)0x40000000;
544 /* Cannot map more than that */
545 if (code_gen_buffer_size > (800 * 1024 * 1024))
546 code_gen_buffer_size = (800 * 1024 * 1024);
547 #elif defined(__sparc_v9__)
548 // Map the buffer below 2G, so we can use direct calls and branches
550 addr = (void *) 0x60000000UL;
551 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
552 code_gen_buffer_size = (512 * 1024 * 1024);
555 code_gen_buffer = mmap(addr, code_gen_buffer_size,
556 PROT_WRITE | PROT_READ | PROT_EXEC,
558 if (code_gen_buffer == MAP_FAILED) {
559 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
564 code_gen_buffer = g_malloc(code_gen_buffer_size);
565 map_exec(code_gen_buffer, code_gen_buffer_size);
567 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
568 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
569 code_gen_buffer_max_size = code_gen_buffer_size -
570 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
571 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
572 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
575 /* Must be called before using the QEMU cpus. 'tb_size' is the size
576 (in bytes) allocated to the translation buffer. Zero means default
578 void tcg_exec_init(unsigned long tb_size)
581 code_gen_alloc(tb_size);
582 code_gen_ptr = code_gen_buffer;
584 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
585 /* There's no guest base to take into account, so go ahead and
586 initialize the prologue now. */
587 tcg_prologue_init(&tcg_ctx);
591 bool tcg_enabled(void)
593 return code_gen_buffer != NULL;
596 void cpu_exec_init_all(void)
598 #if !defined(CONFIG_USER_ONLY)
604 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606 static int cpu_common_post_load(void *opaque, int version_id)
608 CPUState *env = opaque;
610 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
611 version_id is increased. */
612 env->interrupt_request &= ~0x01;
618 static const VMStateDescription vmstate_cpu_common = {
619 .name = "cpu_common",
621 .minimum_version_id = 1,
622 .minimum_version_id_old = 1,
623 .post_load = cpu_common_post_load,
624 .fields = (VMStateField []) {
625 VMSTATE_UINT32(halted, CPUState),
626 VMSTATE_UINT32(interrupt_request, CPUState),
627 VMSTATE_END_OF_LIST()
632 CPUState *qemu_get_cpu(int cpu)
634 CPUState *env = first_cpu;
637 if (env->cpu_index == cpu)
645 void cpu_exec_init(CPUState *env)
650 #if defined(CONFIG_USER_ONLY)
653 env->next_cpu = NULL;
656 while (*penv != NULL) {
657 penv = &(*penv)->next_cpu;
660 env->cpu_index = cpu_index;
662 QTAILQ_INIT(&env->breakpoints);
663 QTAILQ_INIT(&env->watchpoints);
664 #ifndef CONFIG_USER_ONLY
665 env->thread_id = qemu_get_thread_id();
668 #if defined(CONFIG_USER_ONLY)
671 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
672 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
673 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
674 cpu_save, cpu_load, env);
678 /* Allocate a new translation block. Flush the translation buffer if
679 too many translation blocks or too much generated code. */
680 static TranslationBlock *tb_alloc(target_ulong pc)
682 TranslationBlock *tb;
684 if (nb_tbs >= code_gen_max_blocks ||
685 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
693 void tb_free(TranslationBlock *tb)
695 /* In practice this is mostly used for single use temporary TB
696 Ignore the hard cases and just back up if this TB happens to
697 be the last one generated. */
698 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
699 code_gen_ptr = tb->tc_ptr;
704 static inline void invalidate_page_bitmap(PageDesc *p)
706 if (p->code_bitmap) {
707 g_free(p->code_bitmap);
708 p->code_bitmap = NULL;
710 p->code_write_count = 0;
713 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
715 static void page_flush_tb_1 (int level, void **lp)
724 for (i = 0; i < L2_SIZE; ++i) {
725 pd[i].first_tb = NULL;
726 invalidate_page_bitmap(pd + i);
730 for (i = 0; i < L2_SIZE; ++i) {
731 page_flush_tb_1 (level - 1, pp + i);
736 static void page_flush_tb(void)
739 for (i = 0; i < V_L1_SIZE; i++) {
740 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
744 /* flush all the translation blocks */
745 /* XXX: tb_flush is currently not thread safe */
746 void tb_flush(CPUState *env1)
749 #if defined(DEBUG_FLUSH)
750 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
751 (unsigned long)(code_gen_ptr - code_gen_buffer),
753 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
755 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
756 cpu_abort(env1, "Internal error: code buffer overflow\n");
760 for(env = first_cpu; env != NULL; env = env->next_cpu) {
761 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
764 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
767 code_gen_ptr = code_gen_buffer;
768 /* XXX: flush processor icache at this point if cache flush is
773 #ifdef DEBUG_TB_CHECK
775 static void tb_invalidate_check(target_ulong address)
777 TranslationBlock *tb;
779 address &= TARGET_PAGE_MASK;
780 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
781 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
782 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
783 address >= tb->pc + tb->size)) {
784 printf("ERROR invalidate: address=" TARGET_FMT_lx
785 " PC=%08lx size=%04x\n",
786 address, (long)tb->pc, tb->size);
792 /* verify that all the pages have correct rights for code */
793 static void tb_page_check(void)
795 TranslationBlock *tb;
796 int i, flags1, flags2;
798 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
799 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
800 flags1 = page_get_flags(tb->pc);
801 flags2 = page_get_flags(tb->pc + tb->size - 1);
802 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
803 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
804 (long)tb->pc, tb->size, flags1, flags2);
812 /* invalidate one TB */
813 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
816 TranslationBlock *tb1;
820 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
823 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
827 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
829 TranslationBlock *tb1;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
837 *ptb = tb1->page_next[n1];
840 ptb = &tb1->page_next[n1];
844 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
846 TranslationBlock *tb1, **ptb;
849 ptb = &tb->jmp_next[n];
852 /* find tb(n) in circular list */
856 tb1 = (TranslationBlock *)((long)tb1 & ~3);
857 if (n1 == n && tb1 == tb)
860 ptb = &tb1->jmp_first;
862 ptb = &tb1->jmp_next[n1];
865 /* now we can suppress tb(n) from the list */
866 *ptb = tb->jmp_next[n];
868 tb->jmp_next[n] = NULL;
872 /* reset the jump entry 'n' of a TB so that it is not chained to
874 static inline void tb_reset_jump(TranslationBlock *tb, int n)
876 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
879 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
884 tb_page_addr_t phys_pc;
885 TranslationBlock *tb1, *tb2;
887 /* remove the TB from the hash list */
888 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
889 h = tb_phys_hash_func(phys_pc);
890 tb_remove(&tb_phys_hash[h], tb,
891 offsetof(TranslationBlock, phys_hash_next));
893 /* remove the TB from the page list */
894 if (tb->page_addr[0] != page_addr) {
895 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
896 tb_page_remove(&p->first_tb, tb);
897 invalidate_page_bitmap(p);
899 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
900 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
901 tb_page_remove(&p->first_tb, tb);
902 invalidate_page_bitmap(p);
905 tb_invalidated_flag = 1;
907 /* remove the TB from the hash list */
908 h = tb_jmp_cache_hash_func(tb->pc);
909 for(env = first_cpu; env != NULL; env = env->next_cpu) {
910 if (env->tb_jmp_cache[h] == tb)
911 env->tb_jmp_cache[h] = NULL;
914 /* suppress this TB from the two jump lists */
915 tb_jmp_remove(tb, 0);
916 tb_jmp_remove(tb, 1);
918 /* suppress any remaining jumps to this TB */
924 tb1 = (TranslationBlock *)((long)tb1 & ~3);
925 tb2 = tb1->jmp_next[n1];
926 tb_reset_jump(tb1, n1);
927 tb1->jmp_next[n1] = NULL;
930 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
932 tb_phys_invalidate_count++;
935 static inline void set_bits(uint8_t *tab, int start, int len)
941 mask = 0xff << (start & 7);
942 if ((start & ~7) == (end & ~7)) {
944 mask &= ~(0xff << (end & 7));
949 start = (start + 8) & ~7;
951 while (start < end1) {
956 mask = ~(0xff << (end & 7));
962 static void build_page_bitmap(PageDesc *p)
964 int n, tb_start, tb_end;
965 TranslationBlock *tb;
967 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
972 tb = (TranslationBlock *)((long)tb & ~3);
973 /* NOTE: this is subtle as a TB may span two physical pages */
975 /* NOTE: tb_end may be after the end of the page, but
976 it is not a problem */
977 tb_start = tb->pc & ~TARGET_PAGE_MASK;
978 tb_end = tb_start + tb->size;
979 if (tb_end > TARGET_PAGE_SIZE)
980 tb_end = TARGET_PAGE_SIZE;
983 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
985 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
986 tb = tb->page_next[n];
990 TranslationBlock *tb_gen_code(CPUState *env,
991 target_ulong pc, target_ulong cs_base,
992 int flags, int cflags)
994 TranslationBlock *tb;
996 tb_page_addr_t phys_pc, phys_page2;
997 target_ulong virt_page2;
1000 phys_pc = get_page_addr_code(env, pc);
1003 /* flush must be done */
1005 /* cannot fail at this point */
1007 /* Don't forget to invalidate previous TB info. */
1008 tb_invalidated_flag = 1;
1010 tc_ptr = code_gen_ptr;
1011 tb->tc_ptr = tc_ptr;
1012 tb->cs_base = cs_base;
1014 tb->cflags = cflags;
1015 cpu_gen_code(env, tb, &code_gen_size);
1016 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1018 /* check next page if needed */
1019 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1021 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1022 phys_page2 = get_page_addr_code(env, virt_page2);
1024 tb_link_page(tb, phys_pc, phys_page2);
1028 /* invalidate all TBs which intersect with the target physical page
1029 starting in range [start;end[. NOTE: start and end must refer to
1030 the same physical page. 'is_cpu_write_access' should be true if called
1031 from a real cpu write access: the virtual CPU will exit the current
1032 TB if code is modified inside this TB. */
1033 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1034 int is_cpu_write_access)
1036 TranslationBlock *tb, *tb_next, *saved_tb;
1037 CPUState *env = cpu_single_env;
1038 tb_page_addr_t tb_start, tb_end;
1041 #ifdef TARGET_HAS_PRECISE_SMC
1042 int current_tb_not_found = is_cpu_write_access;
1043 TranslationBlock *current_tb = NULL;
1044 int current_tb_modified = 0;
1045 target_ulong current_pc = 0;
1046 target_ulong current_cs_base = 0;
1047 int current_flags = 0;
1048 #endif /* TARGET_HAS_PRECISE_SMC */
1050 p = page_find(start >> TARGET_PAGE_BITS);
1053 if (!p->code_bitmap &&
1054 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1055 is_cpu_write_access) {
1056 /* build code bitmap */
1057 build_page_bitmap(p);
1060 /* we remove all the TBs in the range [start, end[ */
1061 /* XXX: see if in some cases it could be faster to invalidate all the code */
1063 while (tb != NULL) {
1065 tb = (TranslationBlock *)((long)tb & ~3);
1066 tb_next = tb->page_next[n];
1067 /* NOTE: this is subtle as a TB may span two physical pages */
1069 /* NOTE: tb_end may be after the end of the page, but
1070 it is not a problem */
1071 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1072 tb_end = tb_start + tb->size;
1074 tb_start = tb->page_addr[1];
1075 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1077 if (!(tb_end <= start || tb_start >= end)) {
1078 #ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_not_found) {
1080 current_tb_not_found = 0;
1082 if (env->mem_io_pc) {
1083 /* now we have a real cpu fault */
1084 current_tb = tb_find_pc(env->mem_io_pc);
1087 if (current_tb == tb &&
1088 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1089 /* If we are modifying the current TB, we must stop
1090 its execution. We could be more precise by checking
1091 that the modification is after the current PC, but it
1092 would require a specialized function to partially
1093 restore the CPU state */
1095 current_tb_modified = 1;
1096 cpu_restore_state(current_tb, env, env->mem_io_pc);
1097 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1100 #endif /* TARGET_HAS_PRECISE_SMC */
1101 /* we need to do that to handle the case where a signal
1102 occurs while doing tb_phys_invalidate() */
1105 saved_tb = env->current_tb;
1106 env->current_tb = NULL;
1108 tb_phys_invalidate(tb, -1);
1110 env->current_tb = saved_tb;
1111 if (env->interrupt_request && env->current_tb)
1112 cpu_interrupt(env, env->interrupt_request);
1117 #if !defined(CONFIG_USER_ONLY)
1118 /* if no code remaining, no need to continue to use slow writes */
1120 invalidate_page_bitmap(p);
1121 if (is_cpu_write_access) {
1122 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1126 #ifdef TARGET_HAS_PRECISE_SMC
1127 if (current_tb_modified) {
1128 /* we generate a block containing just the instruction
1129 modifying the memory. It will ensure that it cannot modify
1131 env->current_tb = NULL;
1132 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1133 cpu_resume_from_signal(env, NULL);
1138 /* len must be <= 8 and start must be a multiple of len */
1139 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1145 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1146 cpu_single_env->mem_io_vaddr, len,
1147 cpu_single_env->eip,
1148 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1151 p = page_find(start >> TARGET_PAGE_BITS);
1154 if (p->code_bitmap) {
1155 offset = start & ~TARGET_PAGE_MASK;
1156 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1157 if (b & ((1 << len) - 1))
1161 tb_invalidate_phys_page_range(start, start + len, 1);
1165 #if !defined(CONFIG_SOFTMMU)
1166 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1167 unsigned long pc, void *puc)
1169 TranslationBlock *tb;
1172 #ifdef TARGET_HAS_PRECISE_SMC
1173 TranslationBlock *current_tb = NULL;
1174 CPUState *env = cpu_single_env;
1175 int current_tb_modified = 0;
1176 target_ulong current_pc = 0;
1177 target_ulong current_cs_base = 0;
1178 int current_flags = 0;
1181 addr &= TARGET_PAGE_MASK;
1182 p = page_find(addr >> TARGET_PAGE_BITS);
1186 #ifdef TARGET_HAS_PRECISE_SMC
1187 if (tb && pc != 0) {
1188 current_tb = tb_find_pc(pc);
1191 while (tb != NULL) {
1193 tb = (TranslationBlock *)((long)tb & ~3);
1194 #ifdef TARGET_HAS_PRECISE_SMC
1195 if (current_tb == tb &&
1196 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1197 /* If we are modifying the current TB, we must stop
1198 its execution. We could be more precise by checking
1199 that the modification is after the current PC, but it
1200 would require a specialized function to partially
1201 restore the CPU state */
1203 current_tb_modified = 1;
1204 cpu_restore_state(current_tb, env, pc);
1205 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1208 #endif /* TARGET_HAS_PRECISE_SMC */
1209 tb_phys_invalidate(tb, addr);
1210 tb = tb->page_next[n];
1213 #ifdef TARGET_HAS_PRECISE_SMC
1214 if (current_tb_modified) {
1215 /* we generate a block containing just the instruction
1216 modifying the memory. It will ensure that it cannot modify
1218 env->current_tb = NULL;
1219 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1220 cpu_resume_from_signal(env, puc);
1226 /* add the tb in the target page and protect it if necessary */
1227 static inline void tb_alloc_page(TranslationBlock *tb,
1228 unsigned int n, tb_page_addr_t page_addr)
1231 #ifndef CONFIG_USER_ONLY
1232 bool page_already_protected;
1235 tb->page_addr[n] = page_addr;
1236 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1237 tb->page_next[n] = p->first_tb;
1238 #ifndef CONFIG_USER_ONLY
1239 page_already_protected = p->first_tb != NULL;
1241 p->first_tb = (TranslationBlock *)((long)tb | n);
1242 invalidate_page_bitmap(p);
1244 #if defined(TARGET_HAS_SMC) || 1
1246 #if defined(CONFIG_USER_ONLY)
1247 if (p->flags & PAGE_WRITE) {
1252 /* force the host page as non writable (writes will have a
1253 page fault + mprotect overhead) */
1254 page_addr &= qemu_host_page_mask;
1256 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1257 addr += TARGET_PAGE_SIZE) {
1259 p2 = page_find (addr >> TARGET_PAGE_BITS);
1263 p2->flags &= ~PAGE_WRITE;
1265 mprotect(g2h(page_addr), qemu_host_page_size,
1266 (prot & PAGE_BITS) & ~PAGE_WRITE);
1267 #ifdef DEBUG_TB_INVALIDATE
1268 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1273 /* if some code is already present, then the pages are already
1274 protected. So we handle the case where only the first TB is
1275 allocated in a physical page */
1276 if (!page_already_protected) {
1277 tlb_protect_code(page_addr);
1281 #endif /* TARGET_HAS_SMC */
1284 /* add a new TB and link it to the physical page tables. phys_page2 is
1285 (-1) to indicate that only one page contains the TB. */
1286 void tb_link_page(TranslationBlock *tb,
1287 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1290 TranslationBlock **ptb;
1292 /* Grab the mmap lock to stop another thread invalidating this TB
1293 before we are done. */
1295 /* add in the physical hash table */
1296 h = tb_phys_hash_func(phys_pc);
1297 ptb = &tb_phys_hash[h];
1298 tb->phys_hash_next = *ptb;
1301 /* add in the page list */
1302 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1303 if (phys_page2 != -1)
1304 tb_alloc_page(tb, 1, phys_page2);
1306 tb->page_addr[1] = -1;
1308 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1309 tb->jmp_next[0] = NULL;
1310 tb->jmp_next[1] = NULL;
1312 /* init original jump addresses */
1313 if (tb->tb_next_offset[0] != 0xffff)
1314 tb_reset_jump(tb, 0);
1315 if (tb->tb_next_offset[1] != 0xffff)
1316 tb_reset_jump(tb, 1);
1318 #ifdef DEBUG_TB_CHECK
1324 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1325 tb[1].tc_ptr. Return NULL if not found */
1326 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1328 int m_min, m_max, m;
1330 TranslationBlock *tb;
1334 if (tc_ptr < (unsigned long)code_gen_buffer ||
1335 tc_ptr >= (unsigned long)code_gen_ptr)
1337 /* binary search (cf Knuth) */
1340 while (m_min <= m_max) {
1341 m = (m_min + m_max) >> 1;
1343 v = (unsigned long)tb->tc_ptr;
1346 else if (tc_ptr < v) {
1355 static void tb_reset_jump_recursive(TranslationBlock *tb);
1357 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1359 TranslationBlock *tb1, *tb_next, **ptb;
1362 tb1 = tb->jmp_next[n];
1364 /* find head of list */
1367 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1370 tb1 = tb1->jmp_next[n1];
1372 /* we are now sure now that tb jumps to tb1 */
1375 /* remove tb from the jmp_first list */
1376 ptb = &tb_next->jmp_first;
1380 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1381 if (n1 == n && tb1 == tb)
1383 ptb = &tb1->jmp_next[n1];
1385 *ptb = tb->jmp_next[n];
1386 tb->jmp_next[n] = NULL;
1388 /* suppress the jump to next tb in generated code */
1389 tb_reset_jump(tb, n);
1391 /* suppress jumps in the tb on which we could have jumped */
1392 tb_reset_jump_recursive(tb_next);
1396 static void tb_reset_jump_recursive(TranslationBlock *tb)
1398 tb_reset_jump_recursive2(tb, 0);
1399 tb_reset_jump_recursive2(tb, 1);
1402 #if defined(TARGET_HAS_ICE)
1403 #if defined(CONFIG_USER_ONLY)
1404 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1406 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1409 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1411 target_phys_addr_t addr;
1413 ram_addr_t ram_addr;
1416 addr = cpu_get_phys_page_debug(env, pc);
1417 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1419 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1420 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1423 #endif /* TARGET_HAS_ICE */
1425 #if defined(CONFIG_USER_ONLY)
1426 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1431 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1432 int flags, CPUWatchpoint **watchpoint)
1437 /* Add a watchpoint. */
1438 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1439 int flags, CPUWatchpoint **watchpoint)
1441 target_ulong len_mask = ~(len - 1);
1444 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1445 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1446 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1447 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1450 wp = g_malloc(sizeof(*wp));
1453 wp->len_mask = len_mask;
1456 /* keep all GDB-injected watchpoints in front */
1458 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1460 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1462 tlb_flush_page(env, addr);
1469 /* Remove a specific watchpoint. */
1470 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1473 target_ulong len_mask = ~(len - 1);
1476 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1477 if (addr == wp->vaddr && len_mask == wp->len_mask
1478 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1479 cpu_watchpoint_remove_by_ref(env, wp);
1486 /* Remove a specific watchpoint by reference. */
1487 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1489 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1491 tlb_flush_page(env, watchpoint->vaddr);
1496 /* Remove all matching watchpoints. */
1497 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1499 CPUWatchpoint *wp, *next;
1501 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1502 if (wp->flags & mask)
1503 cpu_watchpoint_remove_by_ref(env, wp);
1508 /* Add a breakpoint. */
1509 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1510 CPUBreakpoint **breakpoint)
1512 #if defined(TARGET_HAS_ICE)
1515 bp = g_malloc(sizeof(*bp));
1520 /* keep all GDB-injected breakpoints in front */
1522 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1524 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1526 breakpoint_invalidate(env, pc);
1536 /* Remove a specific breakpoint. */
1537 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1539 #if defined(TARGET_HAS_ICE)
1542 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1543 if (bp->pc == pc && bp->flags == flags) {
1544 cpu_breakpoint_remove_by_ref(env, bp);
1554 /* Remove a specific breakpoint by reference. */
1555 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1557 #if defined(TARGET_HAS_ICE)
1558 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1560 breakpoint_invalidate(env, breakpoint->pc);
1566 /* Remove all matching breakpoints. */
1567 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1569 #if defined(TARGET_HAS_ICE)
1570 CPUBreakpoint *bp, *next;
1572 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1573 if (bp->flags & mask)
1574 cpu_breakpoint_remove_by_ref(env, bp);
1579 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1580 CPU loop after each instruction */
1581 void cpu_single_step(CPUState *env, int enabled)
1583 #if defined(TARGET_HAS_ICE)
1584 if (env->singlestep_enabled != enabled) {
1585 env->singlestep_enabled = enabled;
1587 kvm_update_guest_debug(env, 0);
1589 /* must flush all the translated code to avoid inconsistencies */
1590 /* XXX: only flush what is necessary */
1597 /* enable or disable low levels log */
1598 void cpu_set_log(int log_flags)
1600 loglevel = log_flags;
1601 if (loglevel && !logfile) {
1602 logfile = fopen(logfilename, log_append ? "a" : "w");
1604 perror(logfilename);
1607 #if !defined(CONFIG_SOFTMMU)
1608 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610 static char logfile_buf[4096];
1611 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1613 #elif defined(_WIN32)
1614 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1615 setvbuf(logfile, NULL, _IONBF, 0);
1617 setvbuf(logfile, NULL, _IOLBF, 0);
1621 if (!loglevel && logfile) {
1627 void cpu_set_log_filename(const char *filename)
1629 logfilename = strdup(filename);
1634 cpu_set_log(loglevel);
1637 static void cpu_unlink_tb(CPUState *env)
1639 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1640 problem and hope the cpu will stop of its own accord. For userspace
1641 emulation this often isn't actually as bad as it sounds. Often
1642 signals are used primarily to interrupt blocking syscalls. */
1643 TranslationBlock *tb;
1644 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1646 spin_lock(&interrupt_lock);
1647 tb = env->current_tb;
1648 /* if the cpu is currently executing code, we must unlink it and
1649 all the potentially executing TB */
1651 env->current_tb = NULL;
1652 tb_reset_jump_recursive(tb);
1654 spin_unlock(&interrupt_lock);
1657 #ifndef CONFIG_USER_ONLY
1658 /* mask must never be zero, except for A20 change call */
1659 static void tcg_handle_interrupt(CPUState *env, int mask)
1663 old_mask = env->interrupt_request;
1664 env->interrupt_request |= mask;
1667 * If called from iothread context, wake the target cpu in
1670 if (!qemu_cpu_is_self(env)) {
1676 env->icount_decr.u16.high = 0xffff;
1678 && (mask & ~old_mask) != 0) {
1679 cpu_abort(env, "Raised interrupt while not in I/O function");
1686 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1688 #else /* CONFIG_USER_ONLY */
1690 void cpu_interrupt(CPUState *env, int mask)
1692 env->interrupt_request |= mask;
1695 #endif /* CONFIG_USER_ONLY */
1697 void cpu_reset_interrupt(CPUState *env, int mask)
1699 env->interrupt_request &= ~mask;
1702 void cpu_exit(CPUState *env)
1704 env->exit_request = 1;
1708 const CPULogItem cpu_log_items[] = {
1709 { CPU_LOG_TB_OUT_ASM, "out_asm",
1710 "show generated host assembly code for each compiled TB" },
1711 { CPU_LOG_TB_IN_ASM, "in_asm",
1712 "show target assembly code for each compiled TB" },
1713 { CPU_LOG_TB_OP, "op",
1714 "show micro ops for each compiled TB" },
1715 { CPU_LOG_TB_OP_OPT, "op_opt",
1718 "before eflags optimization and "
1720 "after liveness analysis" },
1721 { CPU_LOG_INT, "int",
1722 "show interrupts/exceptions in short format" },
1723 { CPU_LOG_EXEC, "exec",
1724 "show trace before each executed TB (lots of logs)" },
1725 { CPU_LOG_TB_CPU, "cpu",
1726 "show CPU state before block translation" },
1728 { CPU_LOG_PCALL, "pcall",
1729 "show protected mode far calls/returns/exceptions" },
1730 { CPU_LOG_RESET, "cpu_reset",
1731 "show CPU state before CPU resets" },
1734 { CPU_LOG_IOPORT, "ioport",
1735 "show all i/o ports accesses" },
1740 static int cmp1(const char *s1, int n, const char *s2)
1742 if (strlen(s2) != n)
1744 return memcmp(s1, s2, n) == 0;
1747 /* takes a comma separated list of log masks. Return 0 if error. */
1748 int cpu_str_to_log_mask(const char *str)
1750 const CPULogItem *item;
1757 p1 = strchr(p, ',');
1760 if(cmp1(p,p1-p,"all")) {
1761 for(item = cpu_log_items; item->mask != 0; item++) {
1765 for(item = cpu_log_items; item->mask != 0; item++) {
1766 if (cmp1(p, p1 - p, item->name))
1780 void cpu_abort(CPUState *env, const char *fmt, ...)
1787 fprintf(stderr, "qemu: fatal: ");
1788 vfprintf(stderr, fmt, ap);
1789 fprintf(stderr, "\n");
1791 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1793 cpu_dump_state(env, stderr, fprintf, 0);
1795 if (qemu_log_enabled()) {
1796 qemu_log("qemu: fatal: ");
1797 qemu_log_vprintf(fmt, ap2);
1800 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1802 log_cpu_state(env, 0);
1809 #if defined(CONFIG_USER_ONLY)
1811 struct sigaction act;
1812 sigfillset(&act.sa_mask);
1813 act.sa_handler = SIG_DFL;
1814 sigaction(SIGABRT, &act, NULL);
1820 CPUState *cpu_copy(CPUState *env)
1822 CPUState *new_env = cpu_init(env->cpu_model_str);
1823 CPUState *next_cpu = new_env->next_cpu;
1824 int cpu_index = new_env->cpu_index;
1825 #if defined(TARGET_HAS_ICE)
1830 memcpy(new_env, env, sizeof(CPUState));
1832 /* Preserve chaining and index. */
1833 new_env->next_cpu = next_cpu;
1834 new_env->cpu_index = cpu_index;
1836 /* Clone all break/watchpoints.
1837 Note: Once we support ptrace with hw-debug register access, make sure
1838 BP_CPU break/watchpoints are handled correctly on clone. */
1839 QTAILQ_INIT(&env->breakpoints);
1840 QTAILQ_INIT(&env->watchpoints);
1841 #if defined(TARGET_HAS_ICE)
1842 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1843 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1845 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1846 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1854 #if !defined(CONFIG_USER_ONLY)
1856 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1860 /* Discard jump cache entries for any tb which might potentially
1861 overlap the flushed page. */
1862 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1863 memset (&env->tb_jmp_cache[i], 0,
1864 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1866 i = tb_jmp_cache_hash_page(addr);
1867 memset (&env->tb_jmp_cache[i], 0,
1868 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1871 static CPUTLBEntry s_cputlb_empty_entry = {
1878 /* NOTE: if flush_global is true, also flush global entries (not
1880 void tlb_flush(CPUState *env, int flush_global)
1884 #if defined(DEBUG_TLB)
1885 printf("tlb_flush:\n");
1887 /* must reset current TB so that interrupts cannot modify the
1888 links while we are modifying them */
1889 env->current_tb = NULL;
1891 for(i = 0; i < CPU_TLB_SIZE; i++) {
1893 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1894 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1898 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1900 env->tlb_flush_addr = -1;
1901 env->tlb_flush_mask = 0;
1905 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1907 if (addr == (tlb_entry->addr_read &
1908 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1909 addr == (tlb_entry->addr_write &
1910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1911 addr == (tlb_entry->addr_code &
1912 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1913 *tlb_entry = s_cputlb_empty_entry;
1917 void tlb_flush_page(CPUState *env, target_ulong addr)
1922 #if defined(DEBUG_TLB)
1923 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1925 /* Check if we need to flush due to large pages. */
1926 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1927 #if defined(DEBUG_TLB)
1928 printf("tlb_flush_page: forced full flush ("
1929 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1930 env->tlb_flush_addr, env->tlb_flush_mask);
1935 /* must reset current TB so that interrupts cannot modify the
1936 links while we are modifying them */
1937 env->current_tb = NULL;
1939 addr &= TARGET_PAGE_MASK;
1940 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1941 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1942 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1944 tlb_flush_jmp_cache(env, addr);
1947 /* update the TLBs so that writes to code in the virtual page 'addr'
1949 static void tlb_protect_code(ram_addr_t ram_addr)
1951 cpu_physical_memory_reset_dirty(ram_addr,
1952 ram_addr + TARGET_PAGE_SIZE,
1956 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1957 tested for self modifying code */
1958 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1961 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1964 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1965 unsigned long start, unsigned long length)
1968 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1969 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1970 if ((addr - start) < length) {
1971 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1976 /* Note: start and end must be within the same ram block. */
1977 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1981 unsigned long length, start1;
1984 start &= TARGET_PAGE_MASK;
1985 end = TARGET_PAGE_ALIGN(end);
1987 length = end - start;
1990 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1992 /* we modify the TLB cache so that the dirty bit will be set again
1993 when accessing the range */
1994 start1 = (unsigned long)qemu_safe_ram_ptr(start);
1995 /* Check that we don't span multiple blocks - this breaks the
1996 address comparisons below. */
1997 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
1998 != (end - 1) - start) {
2002 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2004 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2005 for(i = 0; i < CPU_TLB_SIZE; i++)
2006 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2012 int cpu_physical_memory_set_dirty_tracking(int enable)
2015 in_migration = enable;
2019 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2021 ram_addr_t ram_addr;
2024 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2025 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2026 + tlb_entry->addend);
2027 ram_addr = qemu_ram_addr_from_host_nofail(p);
2028 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2029 tlb_entry->addr_write |= TLB_NOTDIRTY;
2034 /* update the TLB according to the current state of the dirty bits */
2035 void cpu_tlb_update_dirty(CPUState *env)
2039 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2040 for(i = 0; i < CPU_TLB_SIZE; i++)
2041 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2045 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2047 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2048 tlb_entry->addr_write = vaddr;
2051 /* update the TLB corresponding to virtual page vaddr
2052 so that it is no longer dirty */
2053 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2058 vaddr &= TARGET_PAGE_MASK;
2059 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2060 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2061 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2064 /* Our TLB does not support large pages, so remember the area covered by
2065 large pages and trigger a full TLB flush if these are invalidated. */
2066 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2069 target_ulong mask = ~(size - 1);
2071 if (env->tlb_flush_addr == (target_ulong)-1) {
2072 env->tlb_flush_addr = vaddr & mask;
2073 env->tlb_flush_mask = mask;
2076 /* Extend the existing region to include the new page.
2077 This is a compromise between unnecessary flushes and the cost
2078 of maintaining a full variable size TLB. */
2079 mask &= env->tlb_flush_mask;
2080 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2083 env->tlb_flush_addr &= mask;
2084 env->tlb_flush_mask = mask;
2087 static bool is_ram_rom(ram_addr_t pd)
2089 pd &= ~TARGET_PAGE_MASK;
2090 return pd == IO_MEM_RAM || pd == IO_MEM_ROM;
2093 static bool is_ram_rom_romd(ram_addr_t pd)
2095 return is_ram_rom(pd) || (pd & IO_MEM_ROMD);
2098 /* Add a new TLB entry. At most one entry for a given virtual address
2099 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2100 supplied size is only used by tlb_flush_page. */
2101 void tlb_set_page(CPUState *env, target_ulong vaddr,
2102 target_phys_addr_t paddr, int prot,
2103 int mmu_idx, target_ulong size)
2108 target_ulong address;
2109 target_ulong code_address;
2110 unsigned long addend;
2113 target_phys_addr_t iotlb;
2115 assert(size >= TARGET_PAGE_SIZE);
2116 if (size != TARGET_PAGE_SIZE) {
2117 tlb_add_large_page(env, vaddr, size);
2119 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2121 #if defined(DEBUG_TLB)
2122 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2123 " prot=%x idx=%d pd=0x%08lx\n",
2124 vaddr, paddr, prot, mmu_idx, pd);
2128 if (!is_ram_rom_romd(pd)) {
2129 /* IO memory case (romd handled later) */
2130 address |= TLB_MMIO;
2132 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2133 if (is_ram_rom(pd)) {
2135 iotlb = pd & TARGET_PAGE_MASK;
2136 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2137 iotlb |= IO_MEM_NOTDIRTY;
2139 iotlb |= IO_MEM_ROM;
2141 /* IO handlers are currently passed a physical address.
2142 It would be nice to pass an offset from the base address
2143 of that region. This would avoid having to special case RAM,
2144 and avoid full address decoding in every device.
2145 We can't use the high bits of pd for this because
2146 IO_MEM_ROMD uses these as a ram address. */
2147 iotlb = (pd & ~TARGET_PAGE_MASK);
2148 iotlb += p.region_offset;
2151 code_address = address;
2152 /* Make accesses to pages with watchpoints go via the
2153 watchpoint trap routines. */
2154 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2155 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2156 /* Avoid trapping reads of pages with a write breakpoint. */
2157 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2158 iotlb = io_mem_watch + paddr;
2159 address |= TLB_MMIO;
2165 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2166 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2167 te = &env->tlb_table[mmu_idx][index];
2168 te->addend = addend - vaddr;
2169 if (prot & PAGE_READ) {
2170 te->addr_read = address;
2175 if (prot & PAGE_EXEC) {
2176 te->addr_code = code_address;
2180 if (prot & PAGE_WRITE) {
2181 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2182 (pd & IO_MEM_ROMD)) {
2183 /* Write access calls the I/O callback. */
2184 te->addr_write = address | TLB_MMIO;
2185 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2186 !cpu_physical_memory_is_dirty(pd)) {
2187 te->addr_write = address | TLB_NOTDIRTY;
2189 te->addr_write = address;
2192 te->addr_write = -1;
2198 void tlb_flush(CPUState *env, int flush_global)
2202 void tlb_flush_page(CPUState *env, target_ulong addr)
2207 * Walks guest process memory "regions" one by one
2208 * and calls callback function 'fn' for each region.
2211 struct walk_memory_regions_data
2213 walk_memory_regions_fn fn;
2215 unsigned long start;
2219 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2220 abi_ulong end, int new_prot)
2222 if (data->start != -1ul) {
2223 int rc = data->fn(data->priv, data->start, end, data->prot);
2229 data->start = (new_prot ? end : -1ul);
2230 data->prot = new_prot;
2235 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2236 abi_ulong base, int level, void **lp)
2242 return walk_memory_regions_end(data, base, 0);
2247 for (i = 0; i < L2_SIZE; ++i) {
2248 int prot = pd[i].flags;
2250 pa = base | (i << TARGET_PAGE_BITS);
2251 if (prot != data->prot) {
2252 rc = walk_memory_regions_end(data, pa, prot);
2260 for (i = 0; i < L2_SIZE; ++i) {
2261 pa = base | ((abi_ulong)i <<
2262 (TARGET_PAGE_BITS + L2_BITS * level));
2263 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2273 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2275 struct walk_memory_regions_data data;
2283 for (i = 0; i < V_L1_SIZE; i++) {
2284 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2285 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2291 return walk_memory_regions_end(&data, 0, 0);
2294 static int dump_region(void *priv, abi_ulong start,
2295 abi_ulong end, unsigned long prot)
2297 FILE *f = (FILE *)priv;
2299 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2300 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2301 start, end, end - start,
2302 ((prot & PAGE_READ) ? 'r' : '-'),
2303 ((prot & PAGE_WRITE) ? 'w' : '-'),
2304 ((prot & PAGE_EXEC) ? 'x' : '-'));
2309 /* dump memory mappings */
2310 void page_dump(FILE *f)
2312 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2313 "start", "end", "size", "prot");
2314 walk_memory_regions(f, dump_region);
2317 int page_get_flags(target_ulong address)
2321 p = page_find(address >> TARGET_PAGE_BITS);
2327 /* Modify the flags of a page and invalidate the code if necessary.
2328 The flag PAGE_WRITE_ORG is positioned automatically depending
2329 on PAGE_WRITE. The mmap_lock should already be held. */
2330 void page_set_flags(target_ulong start, target_ulong end, int flags)
2332 target_ulong addr, len;
2334 /* This function should never be called with addresses outside the
2335 guest address space. If this assert fires, it probably indicates
2336 a missing call to h2g_valid. */
2337 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2338 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2340 assert(start < end);
2342 start = start & TARGET_PAGE_MASK;
2343 end = TARGET_PAGE_ALIGN(end);
2345 if (flags & PAGE_WRITE) {
2346 flags |= PAGE_WRITE_ORG;
2349 for (addr = start, len = end - start;
2351 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2352 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2354 /* If the write protection bit is set, then we invalidate
2356 if (!(p->flags & PAGE_WRITE) &&
2357 (flags & PAGE_WRITE) &&
2359 tb_invalidate_phys_page(addr, 0, NULL);
2365 int page_check_range(target_ulong start, target_ulong len, int flags)
2371 /* This function should never be called with addresses outside the
2372 guest address space. If this assert fires, it probably indicates
2373 a missing call to h2g_valid. */
2374 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2375 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2381 if (start + len - 1 < start) {
2382 /* We've wrapped around. */
2386 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2387 start = start & TARGET_PAGE_MASK;
2389 for (addr = start, len = end - start;
2391 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2392 p = page_find(addr >> TARGET_PAGE_BITS);
2395 if( !(p->flags & PAGE_VALID) )
2398 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2400 if (flags & PAGE_WRITE) {
2401 if (!(p->flags & PAGE_WRITE_ORG))
2403 /* unprotect the page if it was put read-only because it
2404 contains translated code */
2405 if (!(p->flags & PAGE_WRITE)) {
2406 if (!page_unprotect(addr, 0, NULL))
2415 /* called from signal handler: invalidate the code and unprotect the
2416 page. Return TRUE if the fault was successfully handled. */
2417 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2421 target_ulong host_start, host_end, addr;
2423 /* Technically this isn't safe inside a signal handler. However we
2424 know this only ever happens in a synchronous SEGV handler, so in
2425 practice it seems to be ok. */
2428 p = page_find(address >> TARGET_PAGE_BITS);
2434 /* if the page was really writable, then we change its
2435 protection back to writable */
2436 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2437 host_start = address & qemu_host_page_mask;
2438 host_end = host_start + qemu_host_page_size;
2441 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2442 p = page_find(addr >> TARGET_PAGE_BITS);
2443 p->flags |= PAGE_WRITE;
2446 /* and since the content will be modified, we must invalidate
2447 the corresponding translated code. */
2448 tb_invalidate_phys_page(addr, pc, puc);
2449 #ifdef DEBUG_TB_CHECK
2450 tb_invalidate_check(addr);
2453 mprotect((void *)g2h(host_start), qemu_host_page_size,
2463 static inline void tlb_set_dirty(CPUState *env,
2464 unsigned long addr, target_ulong vaddr)
2467 #endif /* defined(CONFIG_USER_ONLY) */
2469 #if !defined(CONFIG_USER_ONLY)
2471 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2472 typedef struct subpage_t {
2473 target_phys_addr_t base;
2474 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2475 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2478 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2479 ram_addr_t memory, ram_addr_t region_offset);
2480 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2481 ram_addr_t orig_memory,
2482 ram_addr_t region_offset);
2483 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2486 if (addr > start_addr) \
2489 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2490 if (start_addr2 > 0) \
2494 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2495 end_addr2 = TARGET_PAGE_SIZE - 1; \
2497 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2498 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2503 /* register physical memory.
2504 For RAM, 'size' must be a multiple of the target page size.
2505 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2506 io memory page. The address used when calling the IO function is
2507 the offset from the start of the region, plus region_offset. Both
2508 start_addr and region_offset are rounded down to a page boundary
2509 before calculating this offset. This should not be a problem unless
2510 the low bits of start_addr and region_offset differ. */
2511 void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2513 ram_addr_t phys_offset,
2514 ram_addr_t region_offset,
2517 target_phys_addr_t addr, end_addr;
2520 ram_addr_t orig_size = size;
2525 if (phys_offset == IO_MEM_UNASSIGNED) {
2526 region_offset = start_addr;
2528 region_offset &= TARGET_PAGE_MASK;
2529 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2530 end_addr = start_addr + (target_phys_addr_t)size;
2534 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
2535 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2536 ram_addr_t orig_memory = p->phys_offset;
2537 target_phys_addr_t start_addr2, end_addr2;
2538 int need_subpage = 0;
2540 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2543 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2544 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2545 &p->phys_offset, orig_memory,
2548 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2551 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2553 p->region_offset = 0;
2555 p->phys_offset = phys_offset;
2556 p->region_offset = region_offset;
2557 if (is_ram_rom_romd(phys_offset))
2558 phys_offset += TARGET_PAGE_SIZE;
2561 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2562 p->phys_offset = phys_offset;
2563 p->region_offset = region_offset;
2564 if (is_ram_rom_romd(phys_offset)) {
2565 phys_offset += TARGET_PAGE_SIZE;
2567 target_phys_addr_t start_addr2, end_addr2;
2568 int need_subpage = 0;
2570 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2571 end_addr2, need_subpage);
2574 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2575 &p->phys_offset, IO_MEM_UNASSIGNED,
2576 addr & TARGET_PAGE_MASK);
2577 subpage_register(subpage, start_addr2, end_addr2,
2578 phys_offset, region_offset);
2579 p->region_offset = 0;
2583 region_offset += TARGET_PAGE_SIZE;
2584 addr += TARGET_PAGE_SIZE;
2585 } while (addr != end_addr);
2587 /* since each CPU stores ram addresses in its TLB cache, we must
2588 reset the modified entries */
2590 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2595 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2598 kvm_coalesce_mmio_region(addr, size);
2601 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2604 kvm_uncoalesce_mmio_region(addr, size);
2607 void qemu_flush_coalesced_mmio_buffer(void)
2610 kvm_flush_coalesced_mmio_buffer();
2613 #if defined(__linux__) && !defined(TARGET_S390X)
2615 #include <sys/vfs.h>
2617 #define HUGETLBFS_MAGIC 0x958458f6
2619 static long gethugepagesize(const char *path)
2625 ret = statfs(path, &fs);
2626 } while (ret != 0 && errno == EINTR);
2633 if (fs.f_type != HUGETLBFS_MAGIC)
2634 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2639 static void *file_ram_alloc(RAMBlock *block,
2649 unsigned long hpagesize;
2651 hpagesize = gethugepagesize(path);
2656 if (memory < hpagesize) {
2660 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2661 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2665 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2669 fd = mkstemp(filename);
2671 perror("unable to create backing store for hugepages");
2678 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2681 * ftruncate is not supported by hugetlbfs in older
2682 * hosts, so don't bother bailing out on errors.
2683 * If anything goes wrong with it under other filesystems,
2686 if (ftruncate(fd, memory))
2687 perror("ftruncate");
2690 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2691 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2692 * to sidestep this quirk.
2694 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2695 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2697 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2699 if (area == MAP_FAILED) {
2700 perror("file_ram_alloc: can't mmap RAM pages");
2709 static ram_addr_t find_ram_offset(ram_addr_t size)
2711 RAMBlock *block, *next_block;
2712 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2714 if (QLIST_EMPTY(&ram_list.blocks))
2717 QLIST_FOREACH(block, &ram_list.blocks, next) {
2718 ram_addr_t end, next = RAM_ADDR_MAX;
2720 end = block->offset + block->length;
2722 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2723 if (next_block->offset >= end) {
2724 next = MIN(next, next_block->offset);
2727 if (next - end >= size && next - end < mingap) {
2729 mingap = next - end;
2733 if (offset == RAM_ADDR_MAX) {
2734 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2742 static ram_addr_t last_ram_offset(void)
2745 ram_addr_t last = 0;
2747 QLIST_FOREACH(block, &ram_list.blocks, next)
2748 last = MAX(last, block->offset + block->length);
2753 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2755 RAMBlock *new_block, *block;
2758 QLIST_FOREACH(block, &ram_list.blocks, next) {
2759 if (block->offset == addr) {
2765 assert(!new_block->idstr[0]);
2767 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2768 char *id = dev->parent_bus->info->get_dev_path(dev);
2770 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2774 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2776 QLIST_FOREACH(block, &ram_list.blocks, next) {
2777 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2778 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2785 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2788 RAMBlock *new_block;
2790 size = TARGET_PAGE_ALIGN(size);
2791 new_block = g_malloc0(sizeof(*new_block));
2794 new_block->offset = find_ram_offset(size);
2796 new_block->host = host;
2797 new_block->flags |= RAM_PREALLOC_MASK;
2800 #if defined (__linux__) && !defined(TARGET_S390X)
2801 new_block->host = file_ram_alloc(new_block, size, mem_path);
2802 if (!new_block->host) {
2803 new_block->host = qemu_vmalloc(size);
2804 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2807 fprintf(stderr, "-mem-path option unsupported\n");
2811 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2812 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2813 an system defined value, which is at least 256GB. Larger systems
2814 have larger values. We put the guest between the end of data
2815 segment (system break) and this value. We use 32GB as a base to
2816 have enough room for the system break to grow. */
2817 new_block->host = mmap((void*)0x800000000, size,
2818 PROT_EXEC|PROT_READ|PROT_WRITE,
2819 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2820 if (new_block->host == MAP_FAILED) {
2821 fprintf(stderr, "Allocating RAM failed\n");
2825 if (xen_enabled()) {
2826 xen_ram_alloc(new_block->offset, size, mr);
2828 new_block->host = qemu_vmalloc(size);
2831 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2834 new_block->length = size;
2836 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2838 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2839 last_ram_offset() >> TARGET_PAGE_BITS);
2840 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2841 0xff, size >> TARGET_PAGE_BITS);
2844 kvm_setup_guest_memory(new_block->host, size);
2846 return new_block->offset;
2849 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2851 return qemu_ram_alloc_from_ptr(size, NULL, mr);
2854 void qemu_ram_free_from_ptr(ram_addr_t addr)
2858 QLIST_FOREACH(block, &ram_list.blocks, next) {
2859 if (addr == block->offset) {
2860 QLIST_REMOVE(block, next);
2867 void qemu_ram_free(ram_addr_t addr)
2871 QLIST_FOREACH(block, &ram_list.blocks, next) {
2872 if (addr == block->offset) {
2873 QLIST_REMOVE(block, next);
2874 if (block->flags & RAM_PREALLOC_MASK) {
2876 } else if (mem_path) {
2877 #if defined (__linux__) && !defined(TARGET_S390X)
2879 munmap(block->host, block->length);
2882 qemu_vfree(block->host);
2888 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2889 munmap(block->host, block->length);
2891 if (xen_enabled()) {
2892 xen_invalidate_map_cache_entry(block->host);
2894 qemu_vfree(block->host);
2906 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2913 QLIST_FOREACH(block, &ram_list.blocks, next) {
2914 offset = addr - block->offset;
2915 if (offset < block->length) {
2916 vaddr = block->host + offset;
2917 if (block->flags & RAM_PREALLOC_MASK) {
2921 munmap(vaddr, length);
2923 #if defined(__linux__) && !defined(TARGET_S390X)
2926 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2929 flags |= MAP_PRIVATE;
2931 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2932 flags, block->fd, offset);
2934 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2935 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2942 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2943 flags |= MAP_SHARED | MAP_ANONYMOUS;
2944 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2947 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2948 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2952 if (area != vaddr) {
2953 fprintf(stderr, "Could not remap addr: "
2954 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2958 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2964 #endif /* !_WIN32 */
2966 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2967 With the exception of the softmmu code in this file, this should
2968 only be used for local memory (e.g. video ram) that the device owns,
2969 and knows it isn't going to access beyond the end of the block.
2971 It should not be used for general purpose DMA.
2972 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2974 void *qemu_get_ram_ptr(ram_addr_t addr)
2978 QLIST_FOREACH(block, &ram_list.blocks, next) {
2979 if (addr - block->offset < block->length) {
2980 /* Move this entry to to start of the list. */
2981 if (block != QLIST_FIRST(&ram_list.blocks)) {
2982 QLIST_REMOVE(block, next);
2983 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2985 if (xen_enabled()) {
2986 /* We need to check if the requested address is in the RAM
2987 * because we don't want to map the entire memory in QEMU.
2988 * In that case just map until the end of the page.
2990 if (block->offset == 0) {
2991 return xen_map_cache(addr, 0, 0);
2992 } else if (block->host == NULL) {
2994 xen_map_cache(block->offset, block->length, 1);
2997 return block->host + (addr - block->offset);
3001 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3007 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3008 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3010 void *qemu_safe_ram_ptr(ram_addr_t addr)
3014 QLIST_FOREACH(block, &ram_list.blocks, next) {
3015 if (addr - block->offset < block->length) {
3016 if (xen_enabled()) {
3017 /* We need to check if the requested address is in the RAM
3018 * because we don't want to map the entire memory in QEMU.
3019 * In that case just map until the end of the page.
3021 if (block->offset == 0) {
3022 return xen_map_cache(addr, 0, 0);
3023 } else if (block->host == NULL) {
3025 xen_map_cache(block->offset, block->length, 1);
3028 return block->host + (addr - block->offset);
3032 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3038 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3039 * but takes a size argument */
3040 void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
3045 if (xen_enabled()) {
3046 return xen_map_cache(addr, *size, 1);
3050 QLIST_FOREACH(block, &ram_list.blocks, next) {
3051 if (addr - block->offset < block->length) {
3052 if (addr - block->offset + *size > block->length)
3053 *size = block->length - addr + block->offset;
3054 return block->host + (addr - block->offset);
3058 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3063 void qemu_put_ram_ptr(void *addr)
3065 trace_qemu_put_ram_ptr(addr);
3068 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3071 uint8_t *host = ptr;
3073 if (xen_enabled()) {
3074 *ram_addr = xen_ram_addr_from_mapcache(ptr);
3078 QLIST_FOREACH(block, &ram_list.blocks, next) {
3079 /* This case append when the block is not mapped. */
3080 if (block->host == NULL) {
3083 if (host - block->host < block->length) {
3084 *ram_addr = block->offset + (host - block->host);
3092 /* Some of the softmmu routines need to translate from a host pointer
3093 (typically a TLB entry) back to a ram offset. */
3094 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3096 ram_addr_t ram_addr;
3098 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3099 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3105 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3107 #ifdef DEBUG_UNASSIGNED
3108 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3110 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3111 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
3116 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3118 #ifdef DEBUG_UNASSIGNED
3119 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3121 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3122 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
3127 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3129 #ifdef DEBUG_UNASSIGNED
3130 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3132 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3133 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
3138 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3140 #ifdef DEBUG_UNASSIGNED
3141 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3143 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3144 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
3148 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3150 #ifdef DEBUG_UNASSIGNED
3151 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3153 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3154 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
3158 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3160 #ifdef DEBUG_UNASSIGNED
3161 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3163 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3164 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
3168 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3169 unassigned_mem_readb,
3170 unassigned_mem_readw,
3171 unassigned_mem_readl,
3174 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3175 unassigned_mem_writeb,
3176 unassigned_mem_writew,
3177 unassigned_mem_writel,
3180 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3184 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3185 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3186 #if !defined(CONFIG_USER_ONLY)
3187 tb_invalidate_phys_page_fast(ram_addr, 1);
3188 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3191 stb_p(qemu_get_ram_ptr(ram_addr), val);
3192 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3193 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3194 /* we remove the notdirty callback only if the code has been
3196 if (dirty_flags == 0xff)
3197 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3200 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3204 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3205 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3206 #if !defined(CONFIG_USER_ONLY)
3207 tb_invalidate_phys_page_fast(ram_addr, 2);
3208 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3211 stw_p(qemu_get_ram_ptr(ram_addr), val);
3212 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3213 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3214 /* we remove the notdirty callback only if the code has been
3216 if (dirty_flags == 0xff)
3217 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3220 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3224 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3225 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3226 #if !defined(CONFIG_USER_ONLY)
3227 tb_invalidate_phys_page_fast(ram_addr, 4);
3228 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3231 stl_p(qemu_get_ram_ptr(ram_addr), val);
3232 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3233 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3234 /* we remove the notdirty callback only if the code has been
3236 if (dirty_flags == 0xff)
3237 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3240 static CPUReadMemoryFunc * const error_mem_read[3] = {
3241 NULL, /* never used */
3242 NULL, /* never used */
3243 NULL, /* never used */
3246 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3247 notdirty_mem_writeb,
3248 notdirty_mem_writew,
3249 notdirty_mem_writel,
3252 /* Generate a debug exception if a watchpoint has been hit. */
3253 static void check_watchpoint(int offset, int len_mask, int flags)
3255 CPUState *env = cpu_single_env;
3256 target_ulong pc, cs_base;
3257 TranslationBlock *tb;
3262 if (env->watchpoint_hit) {
3263 /* We re-entered the check after replacing the TB. Now raise
3264 * the debug interrupt so that is will trigger after the
3265 * current instruction. */
3266 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3269 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3270 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3271 if ((vaddr == (wp->vaddr & len_mask) ||
3272 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3273 wp->flags |= BP_WATCHPOINT_HIT;
3274 if (!env->watchpoint_hit) {
3275 env->watchpoint_hit = wp;
3276 tb = tb_find_pc(env->mem_io_pc);
3278 cpu_abort(env, "check_watchpoint: could not find TB for "
3279 "pc=%p", (void *)env->mem_io_pc);
3281 cpu_restore_state(tb, env, env->mem_io_pc);
3282 tb_phys_invalidate(tb, -1);
3283 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3284 env->exception_index = EXCP_DEBUG;
3286 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3287 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3289 cpu_resume_from_signal(env, NULL);
3292 wp->flags &= ~BP_WATCHPOINT_HIT;
3297 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3298 so these check for a hit then pass through to the normal out-of-line
3300 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3302 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3303 return ldub_phys(addr);
3306 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3308 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3309 return lduw_phys(addr);
3312 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3314 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3315 return ldl_phys(addr);
3318 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3321 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3322 stb_phys(addr, val);
3325 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3328 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3329 stw_phys(addr, val);
3332 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3335 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3336 stl_phys(addr, val);
3339 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3345 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3351 static inline uint32_t subpage_readlen (subpage_t *mmio,
3352 target_phys_addr_t addr,
3355 unsigned int idx = SUBPAGE_IDX(addr);
3356 #if defined(DEBUG_SUBPAGE)
3357 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3358 mmio, len, addr, idx);
3361 addr += mmio->region_offset[idx];
3362 idx = mmio->sub_io_index[idx];
3363 return io_mem_read(idx, addr, 1 <<len);
3366 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3367 uint32_t value, unsigned int len)
3369 unsigned int idx = SUBPAGE_IDX(addr);
3370 #if defined(DEBUG_SUBPAGE)
3371 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3372 __func__, mmio, len, addr, idx, value);
3375 addr += mmio->region_offset[idx];
3376 idx = mmio->sub_io_index[idx];
3377 io_mem_write(idx, addr, value, 1 << len);
3380 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3382 return subpage_readlen(opaque, addr, 0);
3385 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3388 subpage_writelen(opaque, addr, value, 0);
3391 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3393 return subpage_readlen(opaque, addr, 1);
3396 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3399 subpage_writelen(opaque, addr, value, 1);
3402 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3404 return subpage_readlen(opaque, addr, 2);
3407 static void subpage_writel (void *opaque, target_phys_addr_t addr,
3410 subpage_writelen(opaque, addr, value, 2);
3413 static CPUReadMemoryFunc * const subpage_read[] = {
3419 static CPUWriteMemoryFunc * const subpage_write[] = {
3425 static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3427 ram_addr_t raddr = addr;
3428 void *ptr = qemu_get_ram_ptr(raddr);
3432 static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3435 ram_addr_t raddr = addr;
3436 void *ptr = qemu_get_ram_ptr(raddr);
3440 static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3442 ram_addr_t raddr = addr;
3443 void *ptr = qemu_get_ram_ptr(raddr);
3447 static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3450 ram_addr_t raddr = addr;
3451 void *ptr = qemu_get_ram_ptr(raddr);
3455 static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3457 ram_addr_t raddr = addr;
3458 void *ptr = qemu_get_ram_ptr(raddr);
3462 static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3465 ram_addr_t raddr = addr;
3466 void *ptr = qemu_get_ram_ptr(raddr);
3470 static CPUReadMemoryFunc * const subpage_ram_read[] = {
3476 static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3477 &subpage_ram_writeb,
3478 &subpage_ram_writew,
3479 &subpage_ram_writel,
3482 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3483 ram_addr_t memory, ram_addr_t region_offset)
3487 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3489 idx = SUBPAGE_IDX(start);
3490 eidx = SUBPAGE_IDX(end);
3491 #if defined(DEBUG_SUBPAGE)
3492 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3493 mmio, start, end, idx, eidx, memory);
3495 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
3496 memory = IO_MEM_SUBPAGE_RAM;
3498 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3499 for (; idx <= eidx; idx++) {
3500 mmio->sub_io_index[idx] = memory;
3501 mmio->region_offset[idx] = region_offset;
3507 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3508 ram_addr_t orig_memory,
3509 ram_addr_t region_offset)
3514 mmio = g_malloc0(sizeof(subpage_t));
3517 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3518 #if defined(DEBUG_SUBPAGE)
3519 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3520 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3522 *phys = subpage_memory | IO_MEM_SUBPAGE;
3523 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3528 static int get_free_io_mem_idx(void)
3532 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3533 if (!io_mem_used[i]) {
3537 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3541 /* mem_read and mem_write are arrays of functions containing the
3542 function to access byte (index 0), word (index 1) and dword (index
3543 2). Functions can be omitted with a NULL function pointer.
3544 If io_index is non zero, the corresponding io zone is
3545 modified. If it is zero, a new io zone is allocated. The return
3546 value can be used with cpu_register_physical_memory(). (-1) is
3547 returned if error. */
3548 static int cpu_register_io_memory_fixed(int io_index,
3549 CPUReadMemoryFunc * const *mem_read,
3550 CPUWriteMemoryFunc * const *mem_write,
3555 if (io_index <= 0) {
3556 io_index = get_free_io_mem_idx();
3560 io_index >>= IO_MEM_SHIFT;
3561 if (io_index >= IO_MEM_NB_ENTRIES)
3565 for (i = 0; i < 3; ++i) {
3566 _io_mem_read[io_index][i]
3567 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3569 for (i = 0; i < 3; ++i) {
3570 _io_mem_write[io_index][i]
3571 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3573 io_mem_opaque[io_index] = opaque;
3575 return (io_index << IO_MEM_SHIFT);
3578 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3579 CPUWriteMemoryFunc * const *mem_write,
3582 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3585 void cpu_unregister_io_memory(int io_table_address)
3588 int io_index = io_table_address >> IO_MEM_SHIFT;
3590 for (i=0;i < 3; i++) {
3591 _io_mem_read[io_index][i] = unassigned_mem_read[i];
3592 _io_mem_write[io_index][i] = unassigned_mem_write[i];
3594 io_mem_opaque[io_index] = NULL;
3595 io_mem_used[io_index] = 0;
3598 static void io_mem_init(void)
3602 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3603 unassigned_mem_write, NULL);
3604 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3605 unassigned_mem_write, NULL);
3606 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3607 notdirty_mem_write, NULL);
3608 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
3609 subpage_ram_write, NULL);
3613 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3614 watch_mem_write, NULL);
3617 static void memory_map_init(void)
3619 system_memory = g_malloc(sizeof(*system_memory));
3620 memory_region_init(system_memory, "system", INT64_MAX);
3621 set_system_memory_map(system_memory);
3623 system_io = g_malloc(sizeof(*system_io));
3624 memory_region_init(system_io, "io", 65536);
3625 set_system_io_map(system_io);
3628 MemoryRegion *get_system_memory(void)
3630 return system_memory;
3633 MemoryRegion *get_system_io(void)
3638 #endif /* !defined(CONFIG_USER_ONLY) */
3640 /* physical memory access (slow version, mainly for debug) */
3641 #if defined(CONFIG_USER_ONLY)
3642 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3643 uint8_t *buf, int len, int is_write)
3650 page = addr & TARGET_PAGE_MASK;
3651 l = (page + TARGET_PAGE_SIZE) - addr;
3654 flags = page_get_flags(page);
3655 if (!(flags & PAGE_VALID))
3658 if (!(flags & PAGE_WRITE))
3660 /* XXX: this code should not depend on lock_user */
3661 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3664 unlock_user(p, addr, l);
3666 if (!(flags & PAGE_READ))
3668 /* XXX: this code should not depend on lock_user */
3669 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3672 unlock_user(p, addr, 0);
3682 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3683 int len, int is_write)
3688 target_phys_addr_t page;
3693 page = addr & TARGET_PAGE_MASK;
3694 l = (page + TARGET_PAGE_SIZE) - addr;
3697 p = phys_page_find(page >> TARGET_PAGE_BITS);
3701 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3702 target_phys_addr_t addr1;
3703 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3704 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3705 /* XXX: could force cpu_single_env to NULL to avoid
3707 if (l >= 4 && ((addr1 & 3) == 0)) {
3708 /* 32 bit write access */
3710 io_mem_write(io_index, addr1, val, 4);
3712 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3713 /* 16 bit write access */
3715 io_mem_write(io_index, addr1, val, 2);
3718 /* 8 bit write access */
3720 io_mem_write(io_index, addr1, val, 1);
3725 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3727 ptr = qemu_get_ram_ptr(addr1);
3728 memcpy(ptr, buf, l);
3729 if (!cpu_physical_memory_is_dirty(addr1)) {
3730 /* invalidate code */
3731 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3733 cpu_physical_memory_set_dirty_flags(
3734 addr1, (0xff & ~CODE_DIRTY_FLAG));
3736 qemu_put_ram_ptr(ptr);
3739 if (!is_ram_rom_romd(pd)) {
3740 target_phys_addr_t addr1;
3742 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3743 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3744 if (l >= 4 && ((addr1 & 3) == 0)) {
3745 /* 32 bit read access */
3746 val = io_mem_read(io_index, addr1, 4);
3749 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3750 /* 16 bit read access */
3751 val = io_mem_read(io_index, addr1, 2);
3755 /* 8 bit read access */
3756 val = io_mem_read(io_index, addr1, 1);
3762 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3763 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3764 qemu_put_ram_ptr(ptr);
3773 /* used for ROM loading : can write in RAM and ROM */
3774 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3775 const uint8_t *buf, int len)
3779 target_phys_addr_t page;
3784 page = addr & TARGET_PAGE_MASK;
3785 l = (page + TARGET_PAGE_SIZE) - addr;
3788 p = phys_page_find(page >> TARGET_PAGE_BITS);
3791 if (!is_ram_rom_romd(pd)) {
3794 unsigned long addr1;
3795 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3797 ptr = qemu_get_ram_ptr(addr1);
3798 memcpy(ptr, buf, l);
3799 qemu_put_ram_ptr(ptr);
3809 target_phys_addr_t addr;
3810 target_phys_addr_t len;
3813 static BounceBuffer bounce;
3815 typedef struct MapClient {
3817 void (*callback)(void *opaque);
3818 QLIST_ENTRY(MapClient) link;
3821 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3822 = QLIST_HEAD_INITIALIZER(map_client_list);
3824 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3826 MapClient *client = g_malloc(sizeof(*client));
3828 client->opaque = opaque;
3829 client->callback = callback;
3830 QLIST_INSERT_HEAD(&map_client_list, client, link);
3834 void cpu_unregister_map_client(void *_client)
3836 MapClient *client = (MapClient *)_client;
3838 QLIST_REMOVE(client, link);
3842 static void cpu_notify_map_clients(void)
3846 while (!QLIST_EMPTY(&map_client_list)) {
3847 client = QLIST_FIRST(&map_client_list);
3848 client->callback(client->opaque);
3849 cpu_unregister_map_client(client);
3853 /* Map a physical memory region into a host virtual address.
3854 * May map a subset of the requested range, given by and returned in *plen.
3855 * May return NULL if resources needed to perform the mapping are exhausted.
3856 * Use only for reads OR writes - not for read-modify-write operations.
3857 * Use cpu_register_map_client() to know when retrying the map operation is
3858 * likely to succeed.
3860 void *cpu_physical_memory_map(target_phys_addr_t addr,
3861 target_phys_addr_t *plen,
3864 target_phys_addr_t len = *plen;
3865 target_phys_addr_t todo = 0;
3867 target_phys_addr_t page;
3870 ram_addr_t raddr = RAM_ADDR_MAX;
3875 page = addr & TARGET_PAGE_MASK;
3876 l = (page + TARGET_PAGE_SIZE) - addr;
3879 p = phys_page_find(page >> TARGET_PAGE_BITS);
3882 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3883 if (todo || bounce.buffer) {
3886 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3890 cpu_physical_memory_read(addr, bounce.buffer, l);
3894 return bounce.buffer;
3897 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3905 ret = qemu_ram_ptr_length(raddr, &rlen);
3910 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3911 * Will also mark the memory as dirty if is_write == 1. access_len gives
3912 * the amount of memory that was actually read or written by the caller.
3914 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3915 int is_write, target_phys_addr_t access_len)
3917 if (buffer != bounce.buffer) {
3919 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3920 while (access_len) {
3922 l = TARGET_PAGE_SIZE;
3925 if (!cpu_physical_memory_is_dirty(addr1)) {
3926 /* invalidate code */
3927 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3929 cpu_physical_memory_set_dirty_flags(
3930 addr1, (0xff & ~CODE_DIRTY_FLAG));
3936 if (xen_enabled()) {
3937 xen_invalidate_map_cache_entry(buffer);
3942 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3944 qemu_vfree(bounce.buffer);
3945 bounce.buffer = NULL;
3946 cpu_notify_map_clients();
3949 /* warning: addr must be aligned */
3950 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3951 enum device_endian endian)
3959 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3962 if (!is_ram_rom_romd(pd)) {
3964 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3965 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3966 val = io_mem_read(io_index, addr, 4);
3967 #if defined(TARGET_WORDS_BIGENDIAN)
3968 if (endian == DEVICE_LITTLE_ENDIAN) {
3972 if (endian == DEVICE_BIG_ENDIAN) {
3978 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3979 (addr & ~TARGET_PAGE_MASK);
3981 case DEVICE_LITTLE_ENDIAN:
3982 val = ldl_le_p(ptr);
3984 case DEVICE_BIG_ENDIAN:
3985 val = ldl_be_p(ptr);
3995 uint32_t ldl_phys(target_phys_addr_t addr)
3997 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4000 uint32_t ldl_le_phys(target_phys_addr_t addr)
4002 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4005 uint32_t ldl_be_phys(target_phys_addr_t addr)
4007 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4010 /* warning: addr must be aligned */
4011 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4012 enum device_endian endian)
4020 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4023 if (!is_ram_rom_romd(pd)) {
4025 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4026 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4028 /* XXX This is broken when device endian != cpu endian.
4029 Fix and add "endian" variable check */
4030 #ifdef TARGET_WORDS_BIGENDIAN
4031 val = io_mem_read(io_index, addr, 4) << 32;
4032 val |= io_mem_read(io_index, addr + 4, 4);
4034 val = io_mem_read(io_index, addr, 4);
4035 val |= io_mem_read(io_index, addr + 4, 4) << 32;
4039 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4040 (addr & ~TARGET_PAGE_MASK);
4042 case DEVICE_LITTLE_ENDIAN:
4043 val = ldq_le_p(ptr);
4045 case DEVICE_BIG_ENDIAN:
4046 val = ldq_be_p(ptr);
4056 uint64_t ldq_phys(target_phys_addr_t addr)
4058 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4061 uint64_t ldq_le_phys(target_phys_addr_t addr)
4063 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4066 uint64_t ldq_be_phys(target_phys_addr_t addr)
4068 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4072 uint32_t ldub_phys(target_phys_addr_t addr)
4075 cpu_physical_memory_read(addr, &val, 1);
4079 /* warning: addr must be aligned */
4080 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4081 enum device_endian endian)
4089 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4092 if (!is_ram_rom_romd(pd)) {
4094 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4095 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4096 val = io_mem_read(io_index, addr, 2);
4097 #if defined(TARGET_WORDS_BIGENDIAN)
4098 if (endian == DEVICE_LITTLE_ENDIAN) {
4102 if (endian == DEVICE_BIG_ENDIAN) {
4108 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4109 (addr & ~TARGET_PAGE_MASK);
4111 case DEVICE_LITTLE_ENDIAN:
4112 val = lduw_le_p(ptr);
4114 case DEVICE_BIG_ENDIAN:
4115 val = lduw_be_p(ptr);
4125 uint32_t lduw_phys(target_phys_addr_t addr)
4127 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4130 uint32_t lduw_le_phys(target_phys_addr_t addr)
4132 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4135 uint32_t lduw_be_phys(target_phys_addr_t addr)
4137 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4140 /* warning: addr must be aligned. The ram page is not masked as dirty
4141 and the code inside is not invalidated. It is useful if the dirty
4142 bits are used to track modified PTEs */
4143 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4150 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4153 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4154 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4155 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4156 io_mem_write(io_index, addr, val, 4);
4158 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4159 ptr = qemu_get_ram_ptr(addr1);
4162 if (unlikely(in_migration)) {
4163 if (!cpu_physical_memory_is_dirty(addr1)) {
4164 /* invalidate code */
4165 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4167 cpu_physical_memory_set_dirty_flags(
4168 addr1, (0xff & ~CODE_DIRTY_FLAG));
4174 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4181 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4184 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4185 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4186 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4187 #ifdef TARGET_WORDS_BIGENDIAN
4188 io_mem_write(io_index, addr, val >> 32, 4);
4189 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
4191 io_mem_write(io_index, addr, (uint32_t)val, 4);
4192 io_mem_write(io_index, addr + 4, val >> 32, 4);
4195 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4196 (addr & ~TARGET_PAGE_MASK);
4201 /* warning: addr must be aligned */
4202 static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4203 enum device_endian endian)
4210 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4213 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4214 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4215 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4216 #if defined(TARGET_WORDS_BIGENDIAN)
4217 if (endian == DEVICE_LITTLE_ENDIAN) {
4221 if (endian == DEVICE_BIG_ENDIAN) {
4225 io_mem_write(io_index, addr, val, 4);
4227 unsigned long addr1;
4228 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4230 ptr = qemu_get_ram_ptr(addr1);
4232 case DEVICE_LITTLE_ENDIAN:
4235 case DEVICE_BIG_ENDIAN:
4242 if (!cpu_physical_memory_is_dirty(addr1)) {
4243 /* invalidate code */
4244 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4246 cpu_physical_memory_set_dirty_flags(addr1,
4247 (0xff & ~CODE_DIRTY_FLAG));
4252 void stl_phys(target_phys_addr_t addr, uint32_t val)
4254 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4257 void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4259 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4262 void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4264 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4268 void stb_phys(target_phys_addr_t addr, uint32_t val)
4271 cpu_physical_memory_write(addr, &v, 1);
4274 /* warning: addr must be aligned */
4275 static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4276 enum device_endian endian)
4283 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4286 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4287 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4288 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4289 #if defined(TARGET_WORDS_BIGENDIAN)
4290 if (endian == DEVICE_LITTLE_ENDIAN) {
4294 if (endian == DEVICE_BIG_ENDIAN) {
4298 io_mem_write(io_index, addr, val, 2);
4300 unsigned long addr1;
4301 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4303 ptr = qemu_get_ram_ptr(addr1);
4305 case DEVICE_LITTLE_ENDIAN:
4308 case DEVICE_BIG_ENDIAN:
4315 if (!cpu_physical_memory_is_dirty(addr1)) {
4316 /* invalidate code */
4317 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4319 cpu_physical_memory_set_dirty_flags(addr1,
4320 (0xff & ~CODE_DIRTY_FLAG));
4325 void stw_phys(target_phys_addr_t addr, uint32_t val)
4327 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4330 void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4332 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4335 void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4337 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4341 void stq_phys(target_phys_addr_t addr, uint64_t val)
4344 cpu_physical_memory_write(addr, &val, 8);
4347 void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4349 val = cpu_to_le64(val);
4350 cpu_physical_memory_write(addr, &val, 8);
4353 void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4355 val = cpu_to_be64(val);
4356 cpu_physical_memory_write(addr, &val, 8);
4359 /* virtual memory access for debug (includes writing to ROM) */
4360 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4361 uint8_t *buf, int len, int is_write)
4364 target_phys_addr_t phys_addr;
4368 page = addr & TARGET_PAGE_MASK;
4369 phys_addr = cpu_get_phys_page_debug(env, page);
4370 /* if no physical page mapped, return an error */
4371 if (phys_addr == -1)
4373 l = (page + TARGET_PAGE_SIZE) - addr;
4376 phys_addr += (addr & ~TARGET_PAGE_MASK);
4378 cpu_physical_memory_write_rom(phys_addr, buf, l);
4380 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4389 /* in deterministic execution mode, instructions doing device I/Os
4390 must be at the end of the TB */
4391 void cpu_io_recompile(CPUState *env, void *retaddr)
4393 TranslationBlock *tb;
4395 target_ulong pc, cs_base;
4398 tb = tb_find_pc((unsigned long)retaddr);
4400 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4403 n = env->icount_decr.u16.low + tb->icount;
4404 cpu_restore_state(tb, env, (unsigned long)retaddr);
4405 /* Calculate how many instructions had been executed before the fault
4407 n = n - env->icount_decr.u16.low;
4408 /* Generate a new TB ending on the I/O insn. */
4410 /* On MIPS and SH, delay slot instructions can only be restarted if
4411 they were already the first instruction in the TB. If this is not
4412 the first instruction in a TB then re-execute the preceding
4414 #if defined(TARGET_MIPS)
4415 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4416 env->active_tc.PC -= 4;
4417 env->icount_decr.u16.low++;
4418 env->hflags &= ~MIPS_HFLAG_BMASK;
4420 #elif defined(TARGET_SH4)
4421 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4424 env->icount_decr.u16.low++;
4425 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4428 /* This should never happen. */
4429 if (n > CF_COUNT_MASK)
4430 cpu_abort(env, "TB too big during recompile");
4432 cflags = n | CF_LAST_IO;
4434 cs_base = tb->cs_base;
4436 tb_phys_invalidate(tb, -1);
4437 /* FIXME: In theory this could raise an exception. In practice
4438 we have already translated the block once so it's probably ok. */
4439 tb_gen_code(env, pc, cs_base, flags, cflags);
4440 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4441 the first in the TB) then we end up generating a whole new TB and
4442 repeating the fault, which is horribly inefficient.
4443 Better would be to execute just this insn uncached, or generate a
4445 cpu_resume_from_signal(env, NULL);
4448 #if !defined(CONFIG_USER_ONLY)
4450 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4452 int i, target_code_size, max_target_code_size;
4453 int direct_jmp_count, direct_jmp2_count, cross_page;
4454 TranslationBlock *tb;
4456 target_code_size = 0;
4457 max_target_code_size = 0;
4459 direct_jmp_count = 0;
4460 direct_jmp2_count = 0;
4461 for(i = 0; i < nb_tbs; i++) {
4463 target_code_size += tb->size;
4464 if (tb->size > max_target_code_size)
4465 max_target_code_size = tb->size;
4466 if (tb->page_addr[1] != -1)
4468 if (tb->tb_next_offset[0] != 0xffff) {
4470 if (tb->tb_next_offset[1] != 0xffff) {
4471 direct_jmp2_count++;
4475 /* XXX: avoid using doubles ? */
4476 cpu_fprintf(f, "Translation buffer state:\n");
4477 cpu_fprintf(f, "gen code size %td/%ld\n",
4478 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4479 cpu_fprintf(f, "TB count %d/%d\n",
4480 nb_tbs, code_gen_max_blocks);
4481 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4482 nb_tbs ? target_code_size / nb_tbs : 0,
4483 max_target_code_size);
4484 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4485 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4486 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4487 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4489 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4490 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4492 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4494 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4495 cpu_fprintf(f, "\nStatistics:\n");
4496 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4497 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4498 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4499 tcg_dump_info(f, cpu_fprintf);
4502 /* NOTE: this function can trigger an exception */
4503 /* NOTE2: the returned address is not exactly the physical address: it
4504 is the offset relative to phys_ram_base */
4505 tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4507 int mmu_idx, page_index, pd;
4510 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4511 mmu_idx = cpu_mmu_index(env1);
4512 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4513 (addr & TARGET_PAGE_MASK))) {
4516 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
4517 if (pd != IO_MEM_RAM && pd != IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
4518 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4519 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4521 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4524 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4525 return qemu_ram_addr_from_host_nofail(p);
4528 #define MMUSUFFIX _cmmu
4530 #define GETPC() NULL
4531 #define env cpu_single_env
4532 #define SOFTMMU_CODE_ACCESS
4535 #include "softmmu_template.h"
4538 #include "softmmu_template.h"
4541 #include "softmmu_template.h"
4544 #include "softmmu_template.h"