4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
32 #if defined(CONFIG_USER_ONLY)
34 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
35 #include <sys/param.h>
36 #if __FreeBSD_version >= 700104
37 #define HAVE_KINFO_GETVMMAP
38 #define sigqueue sigqueue_freebsd /* avoid redefinition */
40 #include <machine/profile.h>
49 #include "exec/address-spaces.h"
52 #include "exec/cputlb.h"
53 #include "exec/tb-hash.h"
54 #include "translate-all.h"
55 #include "qemu/bitmap.h"
56 #include "qemu/error-report.h"
57 #include "qemu/timer.h"
58 #include "qemu/main-loop.h"
60 #include "sysemu/cpus.h"
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
67 #ifdef DEBUG_TB_INVALIDATE
68 #define DEBUG_TB_INVALIDATE_GATE 1
70 #define DEBUG_TB_INVALIDATE_GATE 0
74 #define DEBUG_TB_FLUSH_GATE 1
76 #define DEBUG_TB_FLUSH_GATE 0
79 #if !defined(CONFIG_USER_ONLY)
80 /* TB consistency checks only implemented for usermode emulation. */
85 #define DEBUG_TB_CHECK_GATE 1
87 #define DEBUG_TB_CHECK_GATE 0
90 /* Access to the various translations structures need to be serialised via locks
91 * for consistency. This is automatic for SoftMMU based system
92 * emulation due to its single threaded nature. In user-mode emulation
93 * access to the memory related structures are protected with the
97 #define assert_memory_lock() tcg_debug_assert(have_tb_lock)
99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
102 #define SMC_BITMAP_USE_THRESHOLD 10
104 typedef struct PageDesc {
105 /* list of TBs intersecting this ram page */
107 #ifdef CONFIG_SOFTMMU
108 /* in order to optimize self modifying code, we count the number
109 of lookups we do to a given page to use a bitmap */
110 unsigned long *code_bitmap;
111 unsigned int code_write_count;
115 #ifndef CONFIG_USER_ONLY
121 * struct page_entry - page descriptor entry
122 * @pd: pointer to the &struct PageDesc of the page this entry represents
123 * @index: page index of the page
124 * @locked: whether the page is locked
126 * This struct helps us keep track of the locked state of a page, without
127 * bloating &struct PageDesc.
129 * A page lock protects accesses to all fields of &struct PageDesc.
131 * See also: &struct page_collection.
135 tb_page_addr_t index;
140 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
141 * @tree: Binary search tree (BST) of the pages, with key == page index
142 * @max: Pointer to the page in @tree with the highest page index
144 * To avoid deadlock we lock pages in ascending order of page index.
145 * When operating on a set of pages, we need to keep track of them so that
146 * we can lock them in order and also unlock them later. For this we collect
147 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
148 * @tree implementation we use does not provide an O(1) operation to obtain the
149 * highest-ranked element, we use @max to keep track of the inserted page
150 * with the highest index. This is valuable because if a page is not in
151 * the tree and its index is higher than @max's, then we can lock it
152 * without breaking the locking order rule.
154 * Note on naming: 'struct page_set' would be shorter, but we already have a few
155 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
157 * See also: page_collection_lock().
159 struct page_collection {
161 struct page_entry *max;
164 /* list iterators for lists of tagged pointers in TranslationBlock */
165 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
166 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
167 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
168 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
170 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
171 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
173 /* In system mode we want L1_MAP to be based on ram offsets,
174 while in user mode we want it to be based on virtual addresses. */
175 #if !defined(CONFIG_USER_ONLY)
176 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
177 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
179 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
182 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
185 /* Size of the L2 (and L3, etc) page tables. */
187 #define V_L2_SIZE (1 << V_L2_BITS)
189 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
190 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
191 sizeof(((TranslationBlock *)0)->trace_vcpu_dstate)
195 * L1 Mapping properties
197 static int v_l1_size;
198 static int v_l1_shift;
199 static int v_l2_levels;
201 /* The bottom level has pointers to PageDesc, and is indexed by
202 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
204 #define V_L1_MIN_BITS 4
205 #define V_L1_MAX_BITS (V_L2_BITS + 3)
206 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
208 static void *l1_map[V_L1_MAX_SIZE];
210 /* code generation context */
211 TCGContext tcg_init_ctx;
212 __thread TCGContext *tcg_ctx;
216 /* translation block context */
217 static __thread int have_tb_lock;
219 static void page_table_config_init(void)
223 assert(TARGET_PAGE_BITS);
224 /* The bits remaining after N lower levels of page tables. */
225 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
226 if (v_l1_bits < V_L1_MIN_BITS) {
227 v_l1_bits += V_L2_BITS;
230 v_l1_size = 1 << v_l1_bits;
231 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
232 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
234 assert(v_l1_bits <= V_L1_MAX_BITS);
235 assert(v_l1_shift % V_L2_BITS == 0);
236 assert(v_l2_levels >= 0);
239 #define assert_tb_locked() tcg_debug_assert(have_tb_lock)
240 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
244 assert_tb_unlocked();
245 qemu_mutex_lock(&tb_ctx.tb_lock);
253 qemu_mutex_unlock(&tb_ctx.tb_lock);
256 void tb_lock_reset(void)
259 qemu_mutex_unlock(&tb_ctx.tb_lock);
264 void cpu_gen_init(void)
266 tcg_context_init(&tcg_init_ctx);
269 /* Encode VAL as a signed leb128 sequence at P.
270 Return P incremented past the encoded value. */
271 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
278 more = !((val == 0 && (byte & 0x40) == 0)
279 || (val == -1 && (byte & 0x40) != 0));
289 /* Decode a signed leb128 sequence at *PP; increment *PP past the
290 decoded value. Return the decoded value. */
291 static target_long decode_sleb128(uint8_t **pp)
299 val |= (target_ulong)(byte & 0x7f) << shift;
301 } while (byte & 0x80);
302 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
303 val |= -(target_ulong)1 << shift;
310 /* Encode the data collected about the instructions while compiling TB.
311 Place the data at BLOCK, and return the number of bytes consumed.
313 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
314 which come from the target's insn_start data, followed by a uintptr_t
315 which comes from the host pc of the end of the code implementing the insn.
317 Each line of the table is encoded as sleb128 deltas from the previous
318 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
319 That is, the first column is seeded with the guest pc, the last column
320 with the host pc, and the middle columns with zeros. */
322 static int encode_search(TranslationBlock *tb, uint8_t *block)
324 uint8_t *highwater = tcg_ctx->code_gen_highwater;
328 for (i = 0, n = tb->icount; i < n; ++i) {
331 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
333 prev = (j == 0 ? tb->pc : 0);
335 prev = tcg_ctx->gen_insn_data[i - 1][j];
337 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
339 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
340 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
342 /* Test for (pending) buffer overflow. The assumption is that any
343 one row beginning below the high water mark cannot overrun
344 the buffer completely. Thus we can test for overflow after
345 encoding a row without having to check during encoding. */
346 if (unlikely(p > highwater)) {
354 /* The cpu state corresponding to 'searched_pc' is restored.
355 * Called with tb_lock held.
356 * When reset_icount is true, current TB will be interrupted and
357 * icount should be recalculated.
359 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
360 uintptr_t searched_pc, bool reset_icount)
362 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
363 uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
364 CPUArchState *env = cpu->env_ptr;
365 uint8_t *p = tb->tc.ptr + tb->tc.size;
366 int i, j, num_insns = tb->icount;
367 #ifdef CONFIG_PROFILER
368 TCGProfile *prof = &tcg_ctx->prof;
369 int64_t ti = profile_getclock();
372 searched_pc -= GETPC_ADJ;
374 if (searched_pc < host_pc) {
378 /* Reconstruct the stored insn data while looking for the point at
379 which the end of the insn exceeds the searched_pc. */
380 for (i = 0; i < num_insns; ++i) {
381 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
382 data[j] += decode_sleb128(&p);
384 host_pc += decode_sleb128(&p);
385 if (host_pc > searched_pc) {
392 if (reset_icount && (tb->cflags & CF_USE_ICOUNT)) {
394 /* Reset the cycle counter to the start of the block
395 and shift if to the number of actually executed instructions */
396 cpu->icount_decr.u16.low += num_insns - i;
398 restore_state_to_opc(env, tb, data);
400 #ifdef CONFIG_PROFILER
401 atomic_set(&prof->restore_time,
402 prof->restore_time + profile_getclock() - ti);
403 atomic_set(&prof->restore_count, prof->restore_count + 1);
408 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
410 TranslationBlock *tb;
412 uintptr_t check_offset;
414 /* The host_pc has to be in the region of current code buffer. If
415 * it is not we will not be able to resolve it here. The two cases
416 * where host_pc will not be correct are:
418 * - fault during translation (instruction fetch)
419 * - fault from helper (not using GETPC() macro)
421 * Either way we need return early to avoid blowing up on a
422 * recursive tb_lock() as we can't resolve it here.
424 * We are using unsigned arithmetic so if host_pc <
425 * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
426 * above the code_gen_buffer_size
428 check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
430 if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
432 tb = tcg_tb_lookup(host_pc);
434 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
435 if (tb->cflags & CF_NOCACHE) {
436 /* one-shot translation, invalidate it immediately */
437 tb_phys_invalidate(tb, -1);
448 static void page_init(void)
451 page_table_config_init();
453 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
455 #ifdef HAVE_KINFO_GETVMMAP
456 struct kinfo_vmentry *freep;
459 freep = kinfo_getvmmap(getpid(), &cnt);
462 for (i = 0; i < cnt; i++) {
463 unsigned long startaddr, endaddr;
465 startaddr = freep[i].kve_start;
466 endaddr = freep[i].kve_end;
467 if (h2g_valid(startaddr)) {
468 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
470 if (h2g_valid(endaddr)) {
471 endaddr = h2g(endaddr);
472 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
474 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
476 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
487 last_brk = (unsigned long)sbrk(0);
489 f = fopen("/compat/linux/proc/self/maps", "r");
494 unsigned long startaddr, endaddr;
497 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
499 if (n == 2 && h2g_valid(startaddr)) {
500 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
502 if (h2g_valid(endaddr)) {
503 endaddr = h2g(endaddr);
507 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
519 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
525 /* Level 1. Always allocated. */
526 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
529 for (i = v_l2_levels; i > 0; i--) {
530 void **p = atomic_rcu_read(lp);
538 p = g_new0(void *, V_L2_SIZE);
539 existing = atomic_cmpxchg(lp, NULL, p);
540 if (unlikely(existing)) {
546 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
549 pd = atomic_rcu_read(lp);
556 pd = g_new0(PageDesc, V_L2_SIZE);
557 #ifndef CONFIG_USER_ONLY
561 for (i = 0; i < V_L2_SIZE; i++) {
562 qemu_spin_init(&pd[i].lock);
566 existing = atomic_cmpxchg(lp, NULL, pd);
567 if (unlikely(existing)) {
573 return pd + (index & (V_L2_SIZE - 1));
576 static inline PageDesc *page_find(tb_page_addr_t index)
578 return page_find_alloc(index, 0);
581 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
582 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
584 /* In user-mode page locks aren't used; mmap_lock is enough */
585 #ifdef CONFIG_USER_ONLY
587 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
589 static inline void page_lock(PageDesc *pd)
592 static inline void page_unlock(PageDesc *pd)
595 static inline void page_lock_tb(const TranslationBlock *tb)
598 static inline void page_unlock_tb(const TranslationBlock *tb)
601 struct page_collection *
602 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
607 void page_collection_unlock(struct page_collection *set)
609 #else /* !CONFIG_USER_ONLY */
611 #ifdef CONFIG_DEBUG_TCG
613 static __thread GHashTable *ht_pages_locked_debug;
615 static void ht_pages_locked_debug_init(void)
617 if (ht_pages_locked_debug) {
620 ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
623 static bool page_is_locked(const PageDesc *pd)
627 ht_pages_locked_debug_init();
628 found = g_hash_table_lookup(ht_pages_locked_debug, pd);
632 static void page_lock__debug(PageDesc *pd)
634 ht_pages_locked_debug_init();
635 g_assert(!page_is_locked(pd));
636 g_hash_table_insert(ht_pages_locked_debug, pd, pd);
639 static void page_unlock__debug(const PageDesc *pd)
643 ht_pages_locked_debug_init();
644 g_assert(page_is_locked(pd));
645 removed = g_hash_table_remove(ht_pages_locked_debug, pd);
650 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
652 if (unlikely(!page_is_locked(pd))) {
653 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
659 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
661 void assert_no_pages_locked(void)
663 ht_pages_locked_debug_init();
664 g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
667 #else /* !CONFIG_DEBUG_TCG */
669 #define assert_page_locked(pd)
671 static inline void page_lock__debug(const PageDesc *pd)
675 static inline void page_unlock__debug(const PageDesc *pd)
679 #endif /* CONFIG_DEBUG_TCG */
681 static inline void page_lock(PageDesc *pd)
683 page_lock__debug(pd);
684 qemu_spin_lock(&pd->lock);
687 static inline void page_unlock(PageDesc *pd)
689 qemu_spin_unlock(&pd->lock);
690 page_unlock__debug(pd);
693 /* lock the page(s) of a TB in the correct acquisition order */
694 static inline void page_lock_tb(const TranslationBlock *tb)
696 page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
699 static inline void page_unlock_tb(const TranslationBlock *tb)
701 page_unlock(page_find(tb->page_addr[0] >> TARGET_PAGE_BITS));
702 if (unlikely(tb->page_addr[1] != -1)) {
703 page_unlock(page_find(tb->page_addr[1] >> TARGET_PAGE_BITS));
707 static inline struct page_entry *
708 page_entry_new(PageDesc *pd, tb_page_addr_t index)
710 struct page_entry *pe = g_malloc(sizeof(*pe));
718 static void page_entry_destroy(gpointer p)
720 struct page_entry *pe = p;
722 g_assert(pe->locked);
727 /* returns false on success */
728 static bool page_entry_trylock(struct page_entry *pe)
732 busy = qemu_spin_trylock(&pe->pd->lock);
734 g_assert(!pe->locked);
736 page_lock__debug(pe->pd);
741 static void do_page_entry_lock(struct page_entry *pe)
744 g_assert(!pe->locked);
748 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
750 struct page_entry *pe = value;
752 do_page_entry_lock(pe);
756 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
758 struct page_entry *pe = value;
768 * Trylock a page, and if successful, add the page to a collection.
769 * Returns true ("busy") if the page could not be locked; false otherwise.
771 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
773 tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
774 struct page_entry *pe;
777 pe = g_tree_lookup(set->tree, &index);
782 pd = page_find(index);
787 pe = page_entry_new(pd, index);
788 g_tree_insert(set->tree, &pe->index, pe);
791 * If this is either (1) the first insertion or (2) a page whose index
792 * is higher than any other so far, just lock the page and move on.
794 if (set->max == NULL || pe->index > set->max->index) {
796 do_page_entry_lock(pe);
800 * Try to acquire out-of-order lock; if busy, return busy so that we acquire
803 return page_entry_trylock(pe);
806 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
808 tb_page_addr_t a = *(const tb_page_addr_t *)ap;
809 tb_page_addr_t b = *(const tb_page_addr_t *)bp;
820 * Lock a range of pages ([@start,@end[) as well as the pages of all
822 * Locking order: acquire locks in ascending order of page index.
824 struct page_collection *
825 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
827 struct page_collection *set = g_malloc(sizeof(*set));
828 tb_page_addr_t index;
831 start >>= TARGET_PAGE_BITS;
832 end >>= TARGET_PAGE_BITS;
833 g_assert(start <= end);
835 set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
838 assert_no_pages_locked();
841 g_tree_foreach(set->tree, page_entry_lock, NULL);
843 for (index = start; index <= end; index++) {
844 TranslationBlock *tb;
847 pd = page_find(index);
851 if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
852 g_tree_foreach(set->tree, page_entry_unlock, NULL);
855 assert_page_locked(pd);
856 PAGE_FOR_EACH_TB(pd, tb, n) {
857 if (page_trylock_add(set, tb->page_addr[0]) ||
858 (tb->page_addr[1] != -1 &&
859 page_trylock_add(set, tb->page_addr[1]))) {
860 /* drop all locks, and reacquire in order */
861 g_tree_foreach(set->tree, page_entry_unlock, NULL);
869 void page_collection_unlock(struct page_collection *set)
871 /* entries are unlocked and freed via page_entry_destroy */
872 g_tree_destroy(set->tree);
876 #endif /* !CONFIG_USER_ONLY */
878 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
879 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
883 assert_memory_lock();
884 g_assert(phys1 != -1 && phys1 != phys2);
885 p1 = page_find_alloc(phys1 >> TARGET_PAGE_BITS, alloc);
889 if (likely(phys2 == -1)) {
893 p2 = page_find_alloc(phys2 >> TARGET_PAGE_BITS, alloc);
906 #if defined(CONFIG_USER_ONLY)
907 /* Currently it is not recommended to allocate big chunks of data in
908 user mode. It will change when a dedicated libc will be used. */
909 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
910 region in which the guest needs to run. Revisit this. */
911 #define USE_STATIC_CODE_GEN_BUFFER
914 /* Minimum size of the code gen buffer. This number is randomly chosen,
915 but not so small that we can't have a fair number of TB's live. */
916 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
918 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
919 indicated, this is constrained by the range of direct branches on the
920 host cpu, as used by the TCG implementation of goto_tb. */
921 #if defined(__x86_64__)
922 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
923 #elif defined(__sparc__)
924 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
925 #elif defined(__powerpc64__)
926 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
927 #elif defined(__powerpc__)
928 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
929 #elif defined(__aarch64__)
930 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
931 #elif defined(__s390x__)
932 /* We have a +- 4GB range on the branches; leave some slop. */
933 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
934 #elif defined(__mips__)
935 /* We have a 256MB branch region, but leave room to make sure the
936 main executable is also within that region. */
937 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
939 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
942 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
944 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
945 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
946 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
948 static inline size_t size_code_gen_buffer(size_t tb_size)
950 /* Size the buffer. */
952 #ifdef USE_STATIC_CODE_GEN_BUFFER
953 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
955 /* ??? Needs adjustments. */
956 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
957 static buffer, we could size this on RESERVED_VA, on the text
958 segment size of the executable, or continue to use the default. */
959 tb_size = (unsigned long)(ram_size / 4);
962 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
963 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
965 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
966 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
972 /* In order to use J and JAL within the code_gen_buffer, we require
973 that the buffer not cross a 256MB boundary. */
974 static inline bool cross_256mb(void *addr, size_t size)
976 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
979 /* We weren't able to allocate a buffer without crossing that boundary,
980 so make do with the larger portion of the buffer that doesn't cross.
981 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
982 static inline void *split_cross_256mb(void *buf1, size_t size1)
984 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
985 size_t size2 = buf1 + size1 - buf2;
993 tcg_ctx->code_gen_buffer_size = size1;
998 #ifdef USE_STATIC_CODE_GEN_BUFFER
999 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
1000 __attribute__((aligned(CODE_GEN_ALIGN)));
1002 static inline void *alloc_code_gen_buffer(void)
1004 void *buf = static_code_gen_buffer;
1005 void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
1008 /* page-align the beginning and end of the buffer */
1009 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
1010 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1014 /* Honor a command-line option limiting the size of the buffer. */
1015 if (size > tcg_ctx->code_gen_buffer_size) {
1016 size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1017 qemu_real_host_page_size);
1019 tcg_ctx->code_gen_buffer_size = size;
1022 if (cross_256mb(buf, size)) {
1023 buf = split_cross_256mb(buf, size);
1024 size = tcg_ctx->code_gen_buffer_size;
1028 if (qemu_mprotect_rwx(buf, size)) {
1031 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1035 #elif defined(_WIN32)
1036 static inline void *alloc_code_gen_buffer(void)
1038 size_t size = tcg_ctx->code_gen_buffer_size;
1039 return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1040 PAGE_EXECUTE_READWRITE);
1043 static inline void *alloc_code_gen_buffer(void)
1045 int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
1046 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
1047 uintptr_t start = 0;
1048 size_t size = tcg_ctx->code_gen_buffer_size;
1051 /* Constrain the position of the buffer based on the host cpu.
1052 Note that these addresses are chosen in concert with the
1053 addresses assigned in the relevant linker script file. */
1054 # if defined(__PIE__) || defined(__PIC__)
1055 /* Don't bother setting a preferred location if we're building
1056 a position-independent executable. We're more likely to get
1057 an address near the main executable if we let the kernel
1058 choose the address. */
1059 # elif defined(__x86_64__) && defined(MAP_32BIT)
1060 /* Force the memory down into low memory with the executable.
1061 Leave the choice of exact location with the kernel. */
1063 /* Cannot expect to map more than 800MB in low memory. */
1064 if (size > 800u * 1024 * 1024) {
1065 tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
1067 # elif defined(__sparc__)
1068 start = 0x40000000ul;
1069 # elif defined(__s390x__)
1070 start = 0x90000000ul;
1071 # elif defined(__mips__)
1072 # if _MIPS_SIM == _ABI64
1073 start = 0x128000000ul;
1075 start = 0x08000000ul;
1079 buf = mmap((void *)start, size, prot, flags, -1, 0);
1080 if (buf == MAP_FAILED) {
1085 if (cross_256mb(buf, size)) {
1086 /* Try again, with the original still mapped, to avoid re-acquiring
1087 that 256mb crossing. This time don't specify an address. */
1089 void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1090 switch ((int)(buf2 != MAP_FAILED)) {
1092 if (!cross_256mb(buf2, size)) {
1093 /* Success! Use the new buffer. */
1097 /* Failure. Work with what we had. */
1101 /* Split the original buffer. Free the smaller half. */
1102 buf2 = split_cross_256mb(buf, size);
1103 size2 = tcg_ctx->code_gen_buffer_size;
1105 munmap(buf + size2, size - size2);
1107 munmap(buf, size - size2);
1116 /* Request large pages for the buffer. */
1117 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1121 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1123 static inline void code_gen_alloc(size_t tb_size)
1125 tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1126 tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1127 if (tcg_ctx->code_gen_buffer == NULL) {
1128 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1131 qemu_mutex_init(&tb_ctx.tb_lock);
1134 static bool tb_cmp(const void *ap, const void *bp)
1136 const TranslationBlock *a = ap;
1137 const TranslationBlock *b = bp;
1139 return a->pc == b->pc &&
1140 a->cs_base == b->cs_base &&
1141 a->flags == b->flags &&
1142 (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1143 a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1144 a->page_addr[0] == b->page_addr[0] &&
1145 a->page_addr[1] == b->page_addr[1];
1148 static void tb_htable_init(void)
1150 unsigned int mode = QHT_MODE_AUTO_RESIZE;
1152 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1155 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1156 (in bytes) allocated to the translation buffer. Zero means default
1158 void tcg_exec_init(unsigned long tb_size)
1164 code_gen_alloc(tb_size);
1165 #if defined(CONFIG_SOFTMMU)
1166 /* There's no guest base to take into account, so go ahead and
1167 initialize the prologue now. */
1168 tcg_prologue_init(tcg_ctx);
1173 * Allocate a new translation block. Flush the translation buffer if
1174 * too many translation blocks or too much generated code.
1176 * Called with tb_lock held.
1178 static TranslationBlock *tb_alloc(target_ulong pc)
1180 TranslationBlock *tb;
1184 tb = tcg_tb_alloc(tcg_ctx);
1185 if (unlikely(tb == NULL)) {
1191 /* call with @p->lock held */
1192 static inline void invalidate_page_bitmap(PageDesc *p)
1194 assert_page_locked(p);
1195 #ifdef CONFIG_SOFTMMU
1196 g_free(p->code_bitmap);
1197 p->code_bitmap = NULL;
1198 p->code_write_count = 0;
1202 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1203 static void page_flush_tb_1(int level, void **lp)
1213 for (i = 0; i < V_L2_SIZE; ++i) {
1215 pd[i].first_tb = (uintptr_t)NULL;
1216 invalidate_page_bitmap(pd + i);
1217 page_unlock(&pd[i]);
1222 for (i = 0; i < V_L2_SIZE; ++i) {
1223 page_flush_tb_1(level - 1, pp + i);
1228 static void page_flush_tb(void)
1230 int i, l1_sz = v_l1_size;
1232 for (i = 0; i < l1_sz; i++) {
1233 page_flush_tb_1(v_l2_levels, l1_map + i);
1237 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1239 const TranslationBlock *tb = value;
1240 size_t *size = data;
1242 *size += tb->tc.size;
1246 /* flush all the translation blocks */
1247 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1251 /* If it is already been done on request of another CPU,
1254 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1258 if (DEBUG_TB_FLUSH_GATE) {
1259 size_t nb_tbs = tcg_nb_tbs();
1260 size_t host_size = 0;
1262 tcg_tb_foreach(tb_host_size_iter, &host_size);
1263 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1264 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1268 cpu_tb_jmp_cache_clear(cpu);
1271 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1274 tcg_region_reset_all();
1275 /* XXX: flush processor icache at this point if cache flush is
1277 atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1283 void tb_flush(CPUState *cpu)
1285 if (tcg_enabled()) {
1286 unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
1287 async_safe_run_on_cpu(cpu, do_tb_flush,
1288 RUN_ON_CPU_HOST_INT(tb_flush_count));
1293 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1294 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1295 * and let the optimizer get rid of them by wrapping their user-only callers
1296 * with if (DEBUG_TB_CHECK_GATE).
1298 #ifdef CONFIG_USER_ONLY
1301 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
1303 TranslationBlock *tb = p;
1304 target_ulong addr = *(target_ulong *)userp;
1306 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1307 printf("ERROR invalidate: address=" TARGET_FMT_lx
1308 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1312 /* verify that all the pages have correct rights for code
1314 * Called with tb_lock held.
1316 static void tb_invalidate_check(target_ulong address)
1318 address &= TARGET_PAGE_MASK;
1319 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1323 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
1325 TranslationBlock *tb = p;
1328 flags1 = page_get_flags(tb->pc);
1329 flags2 = page_get_flags(tb->pc + tb->size - 1);
1330 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1331 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1332 (long)tb->pc, tb->size, flags1, flags2);
1336 /* verify that all the pages have correct rights for code */
1337 static void tb_page_check(void)
1339 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1342 #endif /* CONFIG_USER_ONLY */
1344 /* call with @pd->lock held */
1345 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1347 TranslationBlock *tb1;
1351 assert_page_locked(pd);
1352 pprev = &pd->first_tb;
1353 PAGE_FOR_EACH_TB(pd, tb1, n1) {
1355 *pprev = tb1->page_next[n1];
1358 pprev = &tb1->page_next[n1];
1360 g_assert_not_reached();
1363 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1364 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1366 TranslationBlock *tb1;
1367 uintptr_t *ptb, ntb;
1370 ptb = &tb->jmp_list_next[n];
1372 /* find tb(n) in circular list */
1376 tb1 = (TranslationBlock *)(ntb & ~3);
1377 if (n1 == n && tb1 == tb) {
1381 ptb = &tb1->jmp_list_first;
1383 ptb = &tb1->jmp_list_next[n1];
1386 /* now we can suppress tb(n) from the list */
1387 *ptb = tb->jmp_list_next[n];
1389 tb->jmp_list_next[n] = (uintptr_t)NULL;
1393 /* reset the jump entry 'n' of a TB so that it is not chained to
1395 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1397 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1398 tb_set_jmp_target(tb, n, addr);
1401 /* remove any jumps to the TB */
1402 static inline void tb_jmp_unlink(TranslationBlock *tb)
1404 TranslationBlock *tb1;
1405 uintptr_t *ptb, ntb;
1408 ptb = &tb->jmp_list_first;
1412 tb1 = (TranslationBlock *)(ntb & ~3);
1416 tb_reset_jump(tb1, n1);
1417 *ptb = tb1->jmp_list_next[n1];
1418 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1422 /* If @rm_from_page_list is set, call with the TB's pages' locks held */
1423 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1428 tb_page_addr_t phys_pc;
1432 atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1434 /* remove the TB from the hash list */
1435 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1436 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1437 tb->trace_vcpu_dstate);
1438 if (!qht_remove(&tb_ctx.htable, tb, h)) {
1442 /* remove the TB from the page list */
1443 if (rm_from_page_list) {
1444 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1445 tb_page_remove(p, tb);
1446 invalidate_page_bitmap(p);
1447 if (tb->page_addr[1] != -1) {
1448 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1449 tb_page_remove(p, tb);
1450 invalidate_page_bitmap(p);
1454 /* remove the TB from the hash list */
1455 h = tb_jmp_cache_hash_func(tb->pc);
1457 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1458 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1462 /* suppress this TB from the two jump lists */
1463 tb_remove_from_jmp_list(tb, 0);
1464 tb_remove_from_jmp_list(tb, 1);
1466 /* suppress any remaining jumps to this TB */
1469 atomic_set(&tcg_ctx->tb_phys_invalidate_count,
1470 tcg_ctx->tb_phys_invalidate_count + 1);
1473 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1475 do_tb_phys_invalidate(tb, true);
1478 /* invalidate one TB
1480 * Called with tb_lock held.
1482 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1484 if (page_addr == -1) {
1486 do_tb_phys_invalidate(tb, true);
1489 do_tb_phys_invalidate(tb, false);
1493 #ifdef CONFIG_SOFTMMU
1494 /* call with @p->lock held */
1495 static void build_page_bitmap(PageDesc *p)
1497 int n, tb_start, tb_end;
1498 TranslationBlock *tb;
1500 assert_page_locked(p);
1501 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1503 PAGE_FOR_EACH_TB(p, tb, n) {
1504 /* NOTE: this is subtle as a TB may span two physical pages */
1506 /* NOTE: tb_end may be after the end of the page, but
1507 it is not a problem */
1508 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1509 tb_end = tb_start + tb->size;
1510 if (tb_end > TARGET_PAGE_SIZE) {
1511 tb_end = TARGET_PAGE_SIZE;
1515 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1517 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1522 /* add the tb in the target page and protect it if necessary
1524 * Called with mmap_lock held for user-mode emulation.
1525 * Called with @p->lock held.
1527 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1528 unsigned int n, tb_page_addr_t page_addr)
1530 #ifndef CONFIG_USER_ONLY
1531 bool page_already_protected;
1534 assert_page_locked(p);
1536 tb->page_addr[n] = page_addr;
1537 tb->page_next[n] = p->first_tb;
1538 #ifndef CONFIG_USER_ONLY
1539 page_already_protected = p->first_tb != (uintptr_t)NULL;
1541 p->first_tb = (uintptr_t)tb | n;
1542 invalidate_page_bitmap(p);
1544 #if defined(CONFIG_USER_ONLY)
1545 if (p->flags & PAGE_WRITE) {
1550 /* force the host page as non writable (writes will have a
1551 page fault + mprotect overhead) */
1552 page_addr &= qemu_host_page_mask;
1554 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1555 addr += TARGET_PAGE_SIZE) {
1557 p2 = page_find(addr >> TARGET_PAGE_BITS);
1562 p2->flags &= ~PAGE_WRITE;
1564 mprotect(g2h(page_addr), qemu_host_page_size,
1565 (prot & PAGE_BITS) & ~PAGE_WRITE);
1566 if (DEBUG_TB_INVALIDATE_GATE) {
1567 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1571 /* if some code is already present, then the pages are already
1572 protected. So we handle the case where only the first TB is
1573 allocated in a physical page */
1574 if (!page_already_protected) {
1575 tlb_protect_code(page_addr);
1580 /* add a new TB and link it to the physical page tables. phys_page2 is
1581 * (-1) to indicate that only one page contains the TB.
1583 * Called with mmap_lock held for user-mode emulation.
1585 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1586 * Note that in !user-mode, another thread might have already added a TB
1587 * for the same block of guest code that @tb corresponds to. In that case,
1588 * the caller should discard the original @tb, and use instead the returned TB.
1590 static TranslationBlock *
1591 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1592 tb_page_addr_t phys_page2)
1595 PageDesc *p2 = NULL;
1596 void *existing_tb = NULL;
1599 assert_memory_lock();
1602 * Add the TB to the page list, acquiring first the pages's locks.
1603 * We keep the locks held until after inserting the TB in the hash table,
1604 * so that if the insertion fails we know for sure that the TBs are still
1605 * in the page descriptors.
1606 * Note that inserting into the hash table first isn't an option, since
1607 * we can only insert TBs that are fully initialized.
1609 page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1610 tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1612 tb_page_add(p2, tb, 1, phys_page2);
1614 tb->page_addr[1] = -1;
1617 /* add in the hash table */
1618 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1619 tb->trace_vcpu_dstate);
1620 qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1622 /* remove TB from the page(s) if we couldn't insert it */
1623 if (unlikely(existing_tb)) {
1624 tb_page_remove(p, tb);
1625 invalidate_page_bitmap(p);
1627 tb_page_remove(p2, tb);
1628 invalidate_page_bitmap(p2);
1638 #ifdef CONFIG_USER_ONLY
1639 if (DEBUG_TB_CHECK_GATE) {
1646 /* Called with mmap_lock held for user mode emulation. */
1647 TranslationBlock *tb_gen_code(CPUState *cpu,
1648 target_ulong pc, target_ulong cs_base,
1649 uint32_t flags, int cflags)
1651 CPUArchState *env = cpu->env_ptr;
1652 TranslationBlock *tb, *existing_tb;
1653 tb_page_addr_t phys_pc, phys_page2;
1654 target_ulong virt_page2;
1655 tcg_insn_unit *gen_code_buf;
1656 int gen_code_size, search_size;
1657 #ifdef CONFIG_PROFILER
1658 TCGProfile *prof = &tcg_ctx->prof;
1661 assert_memory_lock();
1663 phys_pc = get_page_addr_code(env, pc);
1667 if (unlikely(!tb)) {
1668 /* flush must be done */
1671 /* Make the execution loop process the flush as soon as possible. */
1672 cpu->exception_index = EXCP_INTERRUPT;
1676 gen_code_buf = tcg_ctx->code_gen_ptr;
1677 tb->tc.ptr = gen_code_buf;
1679 tb->cs_base = cs_base;
1681 tb->cflags = cflags;
1682 tb->trace_vcpu_dstate = *cpu->trace_dstate;
1683 tcg_ctx->tb_cflags = cflags;
1685 #ifdef CONFIG_PROFILER
1686 /* includes aborted translations because of exceptions */
1687 atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1688 ti = profile_getclock();
1691 tcg_func_start(tcg_ctx);
1693 tcg_ctx->cpu = ENV_GET_CPU(env);
1694 gen_intermediate_code(cpu, tb);
1695 tcg_ctx->cpu = NULL;
1697 trace_translate_block(tb, tb->pc, tb->tc.ptr);
1699 /* generate machine code */
1700 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1701 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1702 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1703 if (TCG_TARGET_HAS_direct_jump) {
1704 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1705 tcg_ctx->tb_jmp_target_addr = NULL;
1707 tcg_ctx->tb_jmp_insn_offset = NULL;
1708 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1711 #ifdef CONFIG_PROFILER
1712 atomic_set(&prof->tb_count, prof->tb_count + 1);
1713 atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1714 ti = profile_getclock();
1717 /* ??? Overflow could be handled better here. In particular, we
1718 don't need to re-do gen_intermediate_code, nor should we re-do
1719 the tcg optimization currently hidden inside tcg_gen_code. All
1720 that should be required is to flush the TBs, allocate a new TB,
1721 re-initialize it per above, and re-do the actual code generation. */
1722 gen_code_size = tcg_gen_code(tcg_ctx, tb);
1723 if (unlikely(gen_code_size < 0)) {
1724 goto buffer_overflow;
1726 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1727 if (unlikely(search_size < 0)) {
1728 goto buffer_overflow;
1730 tb->tc.size = gen_code_size;
1732 #ifdef CONFIG_PROFILER
1733 atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1734 atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1735 atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1736 atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1740 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1741 qemu_log_in_addr_range(tb->pc)) {
1743 qemu_log("OUT: [size=%d]\n", gen_code_size);
1744 if (tcg_ctx->data_gen_ptr) {
1745 size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1746 size_t data_size = gen_code_size - code_size;
1749 log_disas(tb->tc.ptr, code_size);
1751 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1752 if (sizeof(tcg_target_ulong) == 8) {
1753 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
1754 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1755 *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1757 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
1758 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1759 *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1763 log_disas(tb->tc.ptr, gen_code_size);
1771 atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1772 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1775 /* init jump list */
1776 assert(((uintptr_t)tb & 3) == 0);
1777 tb->jmp_list_first = (uintptr_t)tb | 2;
1778 tb->jmp_list_next[0] = (uintptr_t)NULL;
1779 tb->jmp_list_next[1] = (uintptr_t)NULL;
1781 /* init original jump addresses wich has been set during tcg_gen_code() */
1782 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1783 tb_reset_jump(tb, 0);
1785 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1786 tb_reset_jump(tb, 1);
1789 /* check next page if needed */
1790 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1792 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1793 phys_page2 = get_page_addr_code(env, virt_page2);
1795 /* As long as consistency of the TB stuff is provided by tb_lock in user
1796 * mode and is implicit in single-threaded softmmu emulation, no explicit
1797 * memory barrier is required before tb_link_page() makes the TB visible
1798 * through the physical hash table and physical page list.
1800 existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1801 /* if the TB already exists, discard what we just translated */
1802 if (unlikely(existing_tb != tb)) {
1803 uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1805 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1806 atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1814 * Call with all @pages locked.
1815 * @p must be non-NULL.
1818 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1819 PageDesc *p, tb_page_addr_t start,
1821 int is_cpu_write_access)
1823 TranslationBlock *tb;
1824 tb_page_addr_t tb_start, tb_end;
1826 #ifdef TARGET_HAS_PRECISE_SMC
1827 CPUState *cpu = current_cpu;
1828 CPUArchState *env = NULL;
1829 int current_tb_not_found = is_cpu_write_access;
1830 TranslationBlock *current_tb = NULL;
1831 int current_tb_modified = 0;
1832 target_ulong current_pc = 0;
1833 target_ulong current_cs_base = 0;
1834 uint32_t current_flags = 0;
1835 #endif /* TARGET_HAS_PRECISE_SMC */
1837 assert_page_locked(p);
1839 #if defined(TARGET_HAS_PRECISE_SMC)
1845 /* we remove all the TBs in the range [start, end[ */
1846 /* XXX: see if in some cases it could be faster to invalidate all
1848 PAGE_FOR_EACH_TB(p, tb, n) {
1849 assert_page_locked(p);
1850 /* NOTE: this is subtle as a TB may span two physical pages */
1852 /* NOTE: tb_end may be after the end of the page, but
1853 it is not a problem */
1854 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1855 tb_end = tb_start + tb->size;
1857 tb_start = tb->page_addr[1];
1858 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1860 if (!(tb_end <= start || tb_start >= end)) {
1861 #ifdef TARGET_HAS_PRECISE_SMC
1862 if (current_tb_not_found) {
1863 current_tb_not_found = 0;
1865 if (cpu->mem_io_pc) {
1866 /* now we have a real cpu fault */
1867 current_tb = tcg_tb_lookup(cpu->mem_io_pc);
1870 if (current_tb == tb &&
1871 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1872 /* If we are modifying the current TB, we must stop
1873 its execution. We could be more precise by checking
1874 that the modification is after the current PC, but it
1875 would require a specialized function to partially
1876 restore the CPU state */
1878 current_tb_modified = 1;
1879 cpu_restore_state_from_tb(cpu, current_tb,
1880 cpu->mem_io_pc, true);
1881 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1884 #endif /* TARGET_HAS_PRECISE_SMC */
1885 tb_phys_invalidate__locked(tb);
1888 #if !defined(CONFIG_USER_ONLY)
1889 /* if no code remaining, no need to continue to use slow writes */
1891 invalidate_page_bitmap(p);
1892 tlb_unprotect_code(start);
1895 #ifdef TARGET_HAS_PRECISE_SMC
1896 if (current_tb_modified) {
1897 page_collection_unlock(pages);
1898 /* Force execution of one insn next time. */
1899 cpu->cflags_next_tb = 1 | curr_cflags();
1900 cpu_loop_exit_noexc(cpu);
1906 * Invalidate all TBs which intersect with the target physical address range
1907 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1908 * 'is_cpu_write_access' should be true if called from a real cpu write
1909 * access: the virtual CPU will exit the current TB if code is modified inside
1912 * Called with tb_lock/mmap_lock held for user-mode emulation
1913 * Called with tb_lock held for system-mode emulation
1915 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1916 int is_cpu_write_access)
1918 struct page_collection *pages;
1921 assert_memory_lock();
1924 p = page_find(start >> TARGET_PAGE_BITS);
1928 pages = page_collection_lock(start, end);
1929 tb_invalidate_phys_page_range__locked(pages, p, start, end,
1930 is_cpu_write_access);
1931 page_collection_unlock(pages);
1935 * Invalidate all TBs which intersect with the target physical address range
1936 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1937 * 'is_cpu_write_access' should be true if called from a real cpu write
1938 * access: the virtual CPU will exit the current TB if code is modified inside
1941 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1942 * Called with tb_lock held for system-mode emulation
1944 static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1946 struct page_collection *pages;
1947 tb_page_addr_t next;
1949 pages = page_collection_lock(start, end);
1950 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1952 start = next, next += TARGET_PAGE_SIZE) {
1953 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
1954 tb_page_addr_t bound = MIN(next, end);
1959 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
1961 page_collection_unlock(pages);
1964 #ifdef CONFIG_SOFTMMU
1965 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1968 tb_invalidate_phys_range_1(start, end);
1971 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1973 assert_memory_lock();
1975 tb_invalidate_phys_range_1(start, end);
1980 #ifdef CONFIG_SOFTMMU
1981 /* len must be <= 8 and start must be a multiple of len.
1982 * Called via softmmu_template.h when code areas are written to with
1983 * iothread mutex not held.
1985 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1987 struct page_collection *pages;
1992 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1993 cpu_single_env->mem_io_vaddr, len,
1994 cpu_single_env->eip,
1995 cpu_single_env->eip +
1996 (intptr_t)cpu_single_env->segs[R_CS].base);
1999 assert_memory_lock();
2001 p = page_find(start >> TARGET_PAGE_BITS);
2006 pages = page_collection_lock(start, start + len);
2007 assert_page_locked(p);
2008 if (!p->code_bitmap &&
2009 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2010 build_page_bitmap(p);
2012 if (p->code_bitmap) {
2016 nr = start & ~TARGET_PAGE_MASK;
2017 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2018 if (b & ((1 << len) - 1)) {
2023 tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1);
2025 page_collection_unlock(pages);
2028 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2029 * host PC of the faulting store instruction that caused this invalidate.
2030 * Returns true if the caller needs to abort execution of the current
2031 * TB (because it was modified by this store and the guest CPU has
2032 * precise-SMC semantics).
2034 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2036 TranslationBlock *tb;
2039 #ifdef TARGET_HAS_PRECISE_SMC
2040 TranslationBlock *current_tb = NULL;
2041 CPUState *cpu = current_cpu;
2042 CPUArchState *env = NULL;
2043 int current_tb_modified = 0;
2044 target_ulong current_pc = 0;
2045 target_ulong current_cs_base = 0;
2046 uint32_t current_flags = 0;
2049 assert_memory_lock();
2051 addr &= TARGET_PAGE_MASK;
2052 p = page_find(addr >> TARGET_PAGE_BITS);
2058 #ifdef TARGET_HAS_PRECISE_SMC
2059 if (p->first_tb && pc != 0) {
2060 current_tb = tcg_tb_lookup(pc);
2066 assert_page_locked(p);
2067 PAGE_FOR_EACH_TB(p, tb, n) {
2068 #ifdef TARGET_HAS_PRECISE_SMC
2069 if (current_tb == tb &&
2070 (current_tb->cflags & CF_COUNT_MASK) != 1) {
2071 /* If we are modifying the current TB, we must stop
2072 its execution. We could be more precise by checking
2073 that the modification is after the current PC, but it
2074 would require a specialized function to partially
2075 restore the CPU state */
2077 current_tb_modified = 1;
2078 cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2079 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
2082 #endif /* TARGET_HAS_PRECISE_SMC */
2083 tb_phys_invalidate(tb, addr);
2085 p->first_tb = (uintptr_t)NULL;
2086 #ifdef TARGET_HAS_PRECISE_SMC
2087 if (current_tb_modified) {
2088 /* Force execution of one insn next time. */
2089 cpu->cflags_next_tb = 1 | curr_cflags();
2090 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
2091 * back into the cpu_exec loop. */
2101 #if !defined(CONFIG_USER_ONLY)
2102 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
2104 ram_addr_t ram_addr;
2109 mr = address_space_translate(as, addr, &addr, &l, false, attrs);
2110 if (!(memory_region_is_ram(mr)
2111 || memory_region_is_romd(mr))) {
2115 ram_addr = memory_region_get_ram_addr(mr) + addr;
2117 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
2121 #endif /* !defined(CONFIG_USER_ONLY) */
2123 /* Called with tb_lock held. */
2124 void tb_check_watchpoint(CPUState *cpu)
2126 TranslationBlock *tb;
2128 tb = tcg_tb_lookup(cpu->mem_io_pc);
2130 /* We can use retranslation to find the PC. */
2131 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
2132 tb_phys_invalidate(tb, -1);
2134 /* The exception probably happened in a helper. The CPU state should
2135 have been saved before calling it. Fetch the PC from there. */
2136 CPUArchState *env = cpu->env_ptr;
2137 target_ulong pc, cs_base;
2138 tb_page_addr_t addr;
2141 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2142 addr = get_page_addr_code(env, pc);
2143 tb_invalidate_phys_range(addr, addr + 1);
2147 #ifndef CONFIG_USER_ONLY
2148 /* in deterministic execution mode, instructions doing device I/Os
2149 * must be at the end of the TB.
2151 * Called by softmmu_template.h, with iothread mutex not held.
2153 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2155 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2156 CPUArchState *env = cpu->env_ptr;
2158 TranslationBlock *tb;
2162 tb = tcg_tb_lookup(retaddr);
2164 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2167 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2169 /* On MIPS and SH, delay slot instructions can only be restarted if
2170 they were already the first instruction in the TB. If this is not
2171 the first instruction in a TB then re-execute the preceding
2174 #if defined(TARGET_MIPS)
2175 if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2176 && env->active_tc.PC != tb->pc) {
2177 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2178 cpu->icount_decr.u16.low++;
2179 env->hflags &= ~MIPS_HFLAG_BMASK;
2182 #elif defined(TARGET_SH4)
2183 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2184 && env->pc != tb->pc) {
2186 cpu->icount_decr.u16.low++;
2187 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2192 /* Generate a new TB executing the I/O insn. */
2193 cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2195 if (tb->cflags & CF_NOCACHE) {
2197 /* Invalidate original TB if this TB was generated in
2198 * cpu_exec_nocache() */
2199 tb_phys_invalidate(tb->orig_tb, -1);
2204 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2205 * the first in the TB) then we end up generating a whole new TB and
2206 * repeating the fault, which is horribly inefficient.
2207 * Better would be to execute just this insn uncached, or generate a
2210 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
2211 * tb_lock gets reset.
2213 cpu_loop_exit_noexc(cpu);
2216 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
2218 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
2220 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2221 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2225 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2227 /* Discard jump cache entries for any tb which might potentially
2228 overlap the flushed page. */
2229 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2230 tb_jmp_cache_clear_page(cpu, addr);
2233 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
2234 struct qht_stats hst)
2236 uint32_t hgram_opts;
2240 if (!hst.head_buckets) {
2243 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
2244 hst.used_head_buckets, hst.head_buckets,
2245 (double)hst.used_head_buckets / hst.head_buckets * 100);
2247 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2248 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
2249 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2250 hgram_opts |= QDIST_PR_NODECIMAL;
2252 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2253 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
2254 qdist_avg(&hst.occupancy) * 100, hgram);
2257 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2258 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2259 if (hgram_bins > 10) {
2263 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2265 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2266 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
2267 qdist_avg(&hst.chain), hgram);
2271 struct tb_tree_stats {
2275 size_t max_target_size;
2276 size_t direct_jmp_count;
2277 size_t direct_jmp2_count;
2281 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2283 const TranslationBlock *tb = value;
2284 struct tb_tree_stats *tst = data;
2287 tst->host_size += tb->tc.size;
2288 tst->target_size += tb->size;
2289 if (tb->size > tst->max_target_size) {
2290 tst->max_target_size = tb->size;
2292 if (tb->page_addr[1] != -1) {
2295 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2296 tst->direct_jmp_count++;
2297 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2298 tst->direct_jmp2_count++;
2304 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
2306 struct tb_tree_stats tst = {};
2307 struct qht_stats hst;
2310 tcg_tb_foreach(tb_tree_stats_iter, &tst);
2311 nb_tbs = tst.nb_tbs;
2312 /* XXX: avoid using doubles ? */
2313 cpu_fprintf(f, "Translation buffer state:\n");
2315 * Report total code size including the padding and TB structs;
2316 * otherwise users might think "-tb-size" is not honoured.
2317 * For avg host size we use the precise numbers from tb_tree_stats though.
2319 cpu_fprintf(f, "gen code size %zu/%zu\n",
2320 tcg_code_size(), tcg_code_capacity());
2321 cpu_fprintf(f, "TB count %zu\n", nb_tbs);
2322 cpu_fprintf(f, "TB avg target size %zu max=%zu bytes\n",
2323 nb_tbs ? tst.target_size / nb_tbs : 0,
2324 tst.max_target_size);
2325 cpu_fprintf(f, "TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
2326 nb_tbs ? tst.host_size / nb_tbs : 0,
2327 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2328 cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page,
2329 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2330 cpu_fprintf(f, "direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2331 tst.direct_jmp_count,
2332 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2333 tst.direct_jmp2_count,
2334 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2336 qht_statistics_init(&tb_ctx.htable, &hst);
2337 print_qht_statistics(f, cpu_fprintf, hst);
2338 qht_statistics_destroy(&hst);
2340 cpu_fprintf(f, "\nStatistics:\n");
2341 cpu_fprintf(f, "TB flush count %u\n",
2342 atomic_read(&tb_ctx.tb_flush_count));
2343 cpu_fprintf(f, "TB invalidate count %zu\n", tcg_tb_phys_invalidate_count());
2344 cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count());
2345 tcg_dump_info(f, cpu_fprintf);
2348 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
2350 tcg_dump_op_count(f, cpu_fprintf);
2353 #else /* CONFIG_USER_ONLY */
2355 void cpu_interrupt(CPUState *cpu, int mask)
2357 g_assert(qemu_mutex_iothread_locked());
2358 cpu->interrupt_request |= mask;
2359 cpu->icount_decr.u16.high = -1;
2363 * Walks guest process memory "regions" one by one
2364 * and calls callback function 'fn' for each region.
2366 struct walk_memory_regions_data {
2367 walk_memory_regions_fn fn;
2373 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2374 target_ulong end, int new_prot)
2376 if (data->start != -1u) {
2377 int rc = data->fn(data->priv, data->start, end, data->prot);
2383 data->start = (new_prot ? end : -1u);
2384 data->prot = new_prot;
2389 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2390 target_ulong base, int level, void **lp)
2396 return walk_memory_regions_end(data, base, 0);
2402 for (i = 0; i < V_L2_SIZE; ++i) {
2403 int prot = pd[i].flags;
2405 pa = base | (i << TARGET_PAGE_BITS);
2406 if (prot != data->prot) {
2407 rc = walk_memory_regions_end(data, pa, prot);
2416 for (i = 0; i < V_L2_SIZE; ++i) {
2417 pa = base | ((target_ulong)i <<
2418 (TARGET_PAGE_BITS + V_L2_BITS * level));
2419 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2429 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2431 struct walk_memory_regions_data data;
2432 uintptr_t i, l1_sz = v_l1_size;
2439 for (i = 0; i < l1_sz; i++) {
2440 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2441 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2447 return walk_memory_regions_end(&data, 0, 0);
2450 static int dump_region(void *priv, target_ulong start,
2451 target_ulong end, unsigned long prot)
2453 FILE *f = (FILE *)priv;
2455 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2456 " "TARGET_FMT_lx" %c%c%c\n",
2457 start, end, end - start,
2458 ((prot & PAGE_READ) ? 'r' : '-'),
2459 ((prot & PAGE_WRITE) ? 'w' : '-'),
2460 ((prot & PAGE_EXEC) ? 'x' : '-'));
2465 /* dump memory mappings */
2466 void page_dump(FILE *f)
2468 const int length = sizeof(target_ulong) * 2;
2469 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2470 length, "start", length, "end", length, "size", "prot");
2471 walk_memory_regions(f, dump_region);
2474 int page_get_flags(target_ulong address)
2478 p = page_find(address >> TARGET_PAGE_BITS);
2485 /* Modify the flags of a page and invalidate the code if necessary.
2486 The flag PAGE_WRITE_ORG is positioned automatically depending
2487 on PAGE_WRITE. The mmap_lock should already be held. */
2488 void page_set_flags(target_ulong start, target_ulong end, int flags)
2490 target_ulong addr, len;
2492 /* This function should never be called with addresses outside the
2493 guest address space. If this assert fires, it probably indicates
2494 a missing call to h2g_valid. */
2495 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2496 assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2498 assert(start < end);
2499 assert_memory_lock();
2501 start = start & TARGET_PAGE_MASK;
2502 end = TARGET_PAGE_ALIGN(end);
2504 if (flags & PAGE_WRITE) {
2505 flags |= PAGE_WRITE_ORG;
2508 for (addr = start, len = end - start;
2510 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2511 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2513 /* If the write protection bit is set, then we invalidate
2515 if (!(p->flags & PAGE_WRITE) &&
2516 (flags & PAGE_WRITE) &&
2518 tb_invalidate_phys_page(addr, 0);
2524 int page_check_range(target_ulong start, target_ulong len, int flags)
2530 /* This function should never be called with addresses outside the
2531 guest address space. If this assert fires, it probably indicates
2532 a missing call to h2g_valid. */
2533 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2534 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2540 if (start + len - 1 < start) {
2541 /* We've wrapped around. */
2545 /* must do before we loose bits in the next step */
2546 end = TARGET_PAGE_ALIGN(start + len);
2547 start = start & TARGET_PAGE_MASK;
2549 for (addr = start, len = end - start;
2551 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2552 p = page_find(addr >> TARGET_PAGE_BITS);
2556 if (!(p->flags & PAGE_VALID)) {
2560 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2563 if (flags & PAGE_WRITE) {
2564 if (!(p->flags & PAGE_WRITE_ORG)) {
2567 /* unprotect the page if it was put read-only because it
2568 contains translated code */
2569 if (!(p->flags & PAGE_WRITE)) {
2570 if (!page_unprotect(addr, 0)) {
2579 /* called from signal handler: invalidate the code and unprotect the
2580 * page. Return 0 if the fault was not handled, 1 if it was handled,
2581 * and 2 if it was handled but the caller must cause the TB to be
2582 * immediately exited. (We can only return 2 if the 'pc' argument is
2585 int page_unprotect(target_ulong address, uintptr_t pc)
2588 bool current_tb_invalidated;
2590 target_ulong host_start, host_end, addr;
2592 /* Technically this isn't safe inside a signal handler. However we
2593 know this only ever happens in a synchronous SEGV handler, so in
2594 practice it seems to be ok. */
2597 p = page_find(address >> TARGET_PAGE_BITS);
2603 /* if the page was really writable, then we change its
2604 protection back to writable */
2605 if (p->flags & PAGE_WRITE_ORG) {
2606 current_tb_invalidated = false;
2607 if (p->flags & PAGE_WRITE) {
2608 /* If the page is actually marked WRITE then assume this is because
2609 * this thread raced with another one which got here first and
2610 * set the page to PAGE_WRITE and did the TB invalidate for us.
2612 #ifdef TARGET_HAS_PRECISE_SMC
2613 TranslationBlock *current_tb = tcg_tb_lookup(pc);
2615 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2619 host_start = address & qemu_host_page_mask;
2620 host_end = host_start + qemu_host_page_size;
2623 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2624 p = page_find(addr >> TARGET_PAGE_BITS);
2625 p->flags |= PAGE_WRITE;
2628 /* and since the content will be modified, we must invalidate
2629 the corresponding translated code. */
2630 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2631 #ifdef CONFIG_USER_ONLY
2632 if (DEBUG_TB_CHECK_GATE) {
2633 tb_invalidate_check(addr);
2637 mprotect((void *)g2h(host_start), qemu_host_page_size,
2641 /* If current TB was invalidated return to main loop */
2642 return current_tb_invalidated ? 2 : 1;
2647 #endif /* CONFIG_USER_ONLY */
2649 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2650 void tcg_flush_softmmu_tlb(CPUState *cs)
2652 #ifdef CONFIG_SOFTMMU