]> Git Repo - qemu.git/blame - accel/tcg/translate-all.c
tcg: call qemu_spin_destroy for tb->jmp_lock
[qemu.git] / accel / tcg / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
fb0343d5 9 * version 2.1 of the License, or (at your option) any later version.
d19893da
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
14a48c1d 19
7b31bbc2 20#include "qemu/osdep.h"
a2fa63a8 21#include "qemu/units.h"
a8d25326 22#include "qemu-common.h"
d19893da 23
af5ad107 24#define NO_CPU_IO_DEFS
d3eead2e 25#include "cpu.h"
244f1441 26#include "trace.h"
76cad711 27#include "disas/disas.h"
63c91552 28#include "exec/exec-all.h"
dcb32f1d 29#include "tcg/tcg.h"
5b6dd868
BS
30#if defined(CONFIG_USER_ONLY)
31#include "qemu.h"
32#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
33#include <sys/param.h>
34#if __FreeBSD_version >= 700104
35#define HAVE_KINFO_GETVMMAP
36#define sigqueue sigqueue_freebsd /* avoid redefinition */
5b6dd868
BS
37#include <sys/proc.h>
38#include <machine/profile.h>
39#define _KERNEL
40#include <sys/user.h>
41#undef _KERNEL
42#undef sigqueue
43#include <libutil.h>
44#endif
45#endif
0bc3cd62 46#else
8bca9a03 47#include "exec/ram_addr.h"
5b6dd868
BS
48#endif
49
022c62cb 50#include "exec/cputlb.h"
e1b89321 51#include "exec/tb-hash.h"
5b6dd868 52#include "translate-all.h"
510a647f 53#include "qemu/bitmap.h"
61a67f71 54#include "qemu/error-report.h"
3de2faa9 55#include "qemu/qemu-print.h"
0aa09897 56#include "qemu/timer.h"
8d04fb55 57#include "qemu/main-loop.h"
508127e2 58#include "exec/log.h"
d2528bdc 59#include "sysemu/cpus.h"
14a48c1d 60#include "sysemu/tcg.h"
5b6dd868 61
955939a2
AB
62/* #define DEBUG_TB_INVALIDATE */
63/* #define DEBUG_TB_FLUSH */
5b6dd868 64/* make various TB consistency checks */
955939a2 65/* #define DEBUG_TB_CHECK */
5b6dd868 66
dae9e03a
EC
67#ifdef DEBUG_TB_INVALIDATE
68#define DEBUG_TB_INVALIDATE_GATE 1
69#else
70#define DEBUG_TB_INVALIDATE_GATE 0
71#endif
72
424079c1
EC
73#ifdef DEBUG_TB_FLUSH
74#define DEBUG_TB_FLUSH_GATE 1
75#else
76#define DEBUG_TB_FLUSH_GATE 0
77#endif
78
5b6dd868
BS
79#if !defined(CONFIG_USER_ONLY)
80/* TB consistency checks only implemented for usermode emulation. */
81#undef DEBUG_TB_CHECK
82#endif
83
6eb062ab
EC
84#ifdef DEBUG_TB_CHECK
85#define DEBUG_TB_CHECK_GATE 1
86#else
87#define DEBUG_TB_CHECK_GATE 0
88#endif
89
301e40ed 90/* Access to the various translations structures need to be serialised via locks
0ac20318
EC
91 * for consistency.
92 * In user-mode emulation access to the memory related structures are protected
93 * with mmap_lock.
94 * In !user-mode we use per-page locks.
301e40ed 95 */
301e40ed 96#ifdef CONFIG_SOFTMMU
0ac20318 97#define assert_memory_lock()
301e40ed 98#else
6ac3d7e8 99#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
301e40ed
AB
100#endif
101
5b6dd868
BS
102#define SMC_BITMAP_USE_THRESHOLD 10
103
5b6dd868
BS
104typedef struct PageDesc {
105 /* list of TBs intersecting this ram page */
1e05197f 106 uintptr_t first_tb;
6fad459c 107#ifdef CONFIG_SOFTMMU
5b6dd868
BS
108 /* in order to optimize self modifying code, we count the number
109 of lookups we do to a given page to use a bitmap */
510a647f 110 unsigned long *code_bitmap;
94da9aec 111 unsigned int code_write_count;
6fad459c 112#else
5b6dd868
BS
113 unsigned long flags;
114#endif
0b5c91f7
EC
115#ifndef CONFIG_USER_ONLY
116 QemuSpin lock;
117#endif
5b6dd868
BS
118} PageDesc;
119
0b5c91f7
EC
120/**
121 * struct page_entry - page descriptor entry
122 * @pd: pointer to the &struct PageDesc of the page this entry represents
123 * @index: page index of the page
124 * @locked: whether the page is locked
125 *
126 * This struct helps us keep track of the locked state of a page, without
127 * bloating &struct PageDesc.
128 *
129 * A page lock protects accesses to all fields of &struct PageDesc.
130 *
131 * See also: &struct page_collection.
132 */
133struct page_entry {
134 PageDesc *pd;
135 tb_page_addr_t index;
136 bool locked;
137};
138
139/**
140 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
141 * @tree: Binary search tree (BST) of the pages, with key == page index
142 * @max: Pointer to the page in @tree with the highest page index
143 *
144 * To avoid deadlock we lock pages in ascending order of page index.
145 * When operating on a set of pages, we need to keep track of them so that
146 * we can lock them in order and also unlock them later. For this we collect
147 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
148 * @tree implementation we use does not provide an O(1) operation to obtain the
149 * highest-ranked element, we use @max to keep track of the inserted page
150 * with the highest index. This is valuable because if a page is not in
151 * the tree and its index is higher than @max's, then we can lock it
152 * without breaking the locking order rule.
153 *
154 * Note on naming: 'struct page_set' would be shorter, but we already have a few
155 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
156 *
157 * See also: page_collection_lock().
158 */
159struct page_collection {
160 GTree *tree;
161 struct page_entry *max;
162};
163
1e05197f
EC
164/* list iterators for lists of tagged pointers in TranslationBlock */
165#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
166 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
167 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
168 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
169
170#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
171 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
172
194125e3
EC
173#define TB_FOR_EACH_JMP(head_tb, tb, n) \
174 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
175
7d8cbbab
RH
176/*
177 * In system mode we want L1_MAP to be based on ram offsets,
178 * while in user mode we want it to be based on virtual addresses.
179 *
180 * TODO: For user mode, see the caveat re host vs guest virtual
181 * address spaces near GUEST_ADDR_MAX.
182 */
5b6dd868
BS
183#if !defined(CONFIG_USER_ONLY)
184#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
185# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
186#else
187# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
188#endif
189#else
7d8cbbab 190# define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
5b6dd868
BS
191#endif
192
03f49957
PB
193/* Size of the L2 (and L3, etc) page tables. */
194#define V_L2_BITS 10
195#define V_L2_SIZE (1 << V_L2_BITS)
196
61a67f71
LV
197/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
198QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
f18793b0 199 sizeof_field(TranslationBlock, trace_vcpu_dstate)
61a67f71
LV
200 * BITS_PER_BYTE);
201
66ec9f49
VK
202/*
203 * L1 Mapping properties
204 */
205static int v_l1_size;
206static int v_l1_shift;
207static int v_l2_levels;
208
209/* The bottom level has pointers to PageDesc, and is indexed by
210 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
211 */
212#define V_L1_MIN_BITS 4
213#define V_L1_MAX_BITS (V_L2_BITS + 3)
214#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
215
216static void *l1_map[V_L1_MAX_SIZE];
5b6dd868 217
57fec1fe 218/* code generation context */
b1311c4a 219TCGContext tcg_init_ctx;
3468b59e 220__thread TCGContext *tcg_ctx;
44ded3d0 221TBContext tb_ctx;
fdbc2b57 222bool parallel_cpus;
d19893da 223
66ec9f49
VK
224static void page_table_config_init(void)
225{
226 uint32_t v_l1_bits;
227
228 assert(TARGET_PAGE_BITS);
229 /* The bits remaining after N lower levels of page tables. */
230 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
231 if (v_l1_bits < V_L1_MIN_BITS) {
232 v_l1_bits += V_L2_BITS;
233 }
234
235 v_l1_size = 1 << v_l1_bits;
236 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
237 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
238
239 assert(v_l1_bits <= V_L1_MAX_BITS);
240 assert(v_l1_shift % V_L2_BITS == 0);
241 assert(v_l2_levels >= 0);
242}
243
57fec1fe
FB
244void cpu_gen_init(void)
245{
b1311c4a 246 tcg_context_init(&tcg_init_ctx);
57fec1fe
FB
247}
248
fca8a500
RH
249/* Encode VAL as a signed leb128 sequence at P.
250 Return P incremented past the encoded value. */
251static uint8_t *encode_sleb128(uint8_t *p, target_long val)
252{
253 int more, byte;
254
255 do {
256 byte = val & 0x7f;
257 val >>= 7;
258 more = !((val == 0 && (byte & 0x40) == 0)
259 || (val == -1 && (byte & 0x40) != 0));
260 if (more) {
261 byte |= 0x80;
262 }
263 *p++ = byte;
264 } while (more);
265
266 return p;
267}
268
269/* Decode a signed leb128 sequence at *PP; increment *PP past the
270 decoded value. Return the decoded value. */
271static target_long decode_sleb128(uint8_t **pp)
272{
273 uint8_t *p = *pp;
274 target_long val = 0;
275 int byte, shift = 0;
276
277 do {
278 byte = *p++;
279 val |= (target_ulong)(byte & 0x7f) << shift;
280 shift += 7;
281 } while (byte & 0x80);
282 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
283 val |= -(target_ulong)1 << shift;
284 }
285
286 *pp = p;
287 return val;
288}
289
290/* Encode the data collected about the instructions while compiling TB.
291 Place the data at BLOCK, and return the number of bytes consumed.
292
55bbc861 293 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
fca8a500
RH
294 which come from the target's insn_start data, followed by a uintptr_t
295 which comes from the host pc of the end of the code implementing the insn.
296
297 Each line of the table is encoded as sleb128 deltas from the previous
e7e168f4 298 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
fca8a500
RH
299 That is, the first column is seeded with the guest pc, the last column
300 with the host pc, and the middle columns with zeros. */
301
302static int encode_search(TranslationBlock *tb, uint8_t *block)
303{
b1311c4a 304 uint8_t *highwater = tcg_ctx->code_gen_highwater;
fca8a500
RH
305 uint8_t *p = block;
306 int i, j, n;
307
fca8a500
RH
308 for (i = 0, n = tb->icount; i < n; ++i) {
309 target_ulong prev;
310
311 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
312 if (i == 0) {
313 prev = (j == 0 ? tb->pc : 0);
314 } else {
b1311c4a 315 prev = tcg_ctx->gen_insn_data[i - 1][j];
fca8a500 316 }
b1311c4a 317 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
fca8a500 318 }
b1311c4a
EC
319 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
320 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
b125f9dc
RH
321
322 /* Test for (pending) buffer overflow. The assumption is that any
323 one row beginning below the high water mark cannot overrun
324 the buffer completely. Thus we can test for overflow after
325 encoding a row without having to check during encoding. */
326 if (unlikely(p > highwater)) {
327 return -1;
328 }
fca8a500
RH
329 }
330
331 return p - block;
332}
333
7d7500d9 334/* The cpu state corresponding to 'searched_pc' is restored.
afd46fca
PD
335 * When reset_icount is true, current TB will be interrupted and
336 * icount should be recalculated.
7d7500d9 337 */
74f10515 338static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
afd46fca 339 uintptr_t searched_pc, bool reset_icount)
d19893da 340{
fca8a500 341 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
e7e168f4 342 uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
74f10515 343 CPUArchState *env = cpu->env_ptr;
2ac01d6d 344 uint8_t *p = tb->tc.ptr + tb->tc.size;
fca8a500 345 int i, j, num_insns = tb->icount;
57fec1fe 346#ifdef CONFIG_PROFILER
c3fac113 347 TCGProfile *prof = &tcg_ctx->prof;
fca8a500 348 int64_t ti = profile_getclock();
57fec1fe
FB
349#endif
350
01ecaf43
RH
351 searched_pc -= GETPC_ADJ;
352
fca8a500
RH
353 if (searched_pc < host_pc) {
354 return -1;
355 }
d19893da 356
fca8a500
RH
357 /* Reconstruct the stored insn data while looking for the point at
358 which the end of the insn exceeds the searched_pc. */
359 for (i = 0; i < num_insns; ++i) {
360 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
361 data[j] += decode_sleb128(&p);
362 }
363 host_pc += decode_sleb128(&p);
364 if (host_pc > searched_pc) {
365 goto found;
366 }
367 }
368 return -1;
3b46e624 369
fca8a500 370 found:
194125e3 371 if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
414b15c9 372 assert(use_icount);
afd46fca
PD
373 /* Reset the cycle counter to the start of the block
374 and shift if to the number of actually executed instructions */
5e140196 375 cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
2e70f6ef 376 }
fca8a500 377 restore_state_to_opc(env, tb, data);
57fec1fe
FB
378
379#ifdef CONFIG_PROFILER
c3fac113
EC
380 atomic_set(&prof->restore_time,
381 prof->restore_time + profile_getclock() - ti);
382 atomic_set(&prof->restore_count, prof->restore_count + 1);
57fec1fe 383#endif
d19893da
FB
384 return 0;
385}
5b6dd868 386
938e897a
EC
387void tb_destroy(TranslationBlock *tb)
388{
389 qemu_spin_destroy(&tb->jmp_lock);
390}
391
afd46fca 392bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
a8a826a3
BS
393{
394 TranslationBlock *tb;
a5e99826 395 bool r = false;
d25f2a72
AB
396 uintptr_t check_offset;
397
398 /* The host_pc has to be in the region of current code buffer. If
399 * it is not we will not be able to resolve it here. The two cases
400 * where host_pc will not be correct are:
401 *
402 * - fault during translation (instruction fetch)
403 * - fault from helper (not using GETPC() macro)
404 *
0ac20318 405 * Either way we need return early as we can't resolve it here.
d25f2a72
AB
406 *
407 * We are using unsigned arithmetic so if host_pc <
408 * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
409 * above the code_gen_buffer_size
d8b2239b 410 */
d25f2a72
AB
411 check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
412
413 if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
be2cdc5e 414 tb = tcg_tb_lookup(host_pc);
d25f2a72 415 if (tb) {
afd46fca 416 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
194125e3 417 if (tb_cflags(tb) & CF_NOCACHE) {
d25f2a72
AB
418 /* one-shot translation, invalidate it immediately */
419 tb_phys_invalidate(tb, -1);
be2cdc5e 420 tcg_tb_remove(tb);
938e897a 421 tb_destroy(tb);
d25f2a72
AB
422 }
423 r = true;
d8a499f1 424 }
a8a826a3 425 }
a5e99826
FK
426
427 return r;
a8a826a3
BS
428}
429
47c16ed5
AK
430static void page_init(void)
431{
432 page_size_init();
66ec9f49
VK
433 page_table_config_init();
434
5b6dd868
BS
435#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
436 {
437#ifdef HAVE_KINFO_GETVMMAP
438 struct kinfo_vmentry *freep;
439 int i, cnt;
440
441 freep = kinfo_getvmmap(getpid(), &cnt);
442 if (freep) {
443 mmap_lock();
444 for (i = 0; i < cnt; i++) {
445 unsigned long startaddr, endaddr;
446
447 startaddr = freep[i].kve_start;
448 endaddr = freep[i].kve_end;
449 if (h2g_valid(startaddr)) {
450 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
451
452 if (h2g_valid(endaddr)) {
453 endaddr = h2g(endaddr);
454 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
455 } else {
456#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
457 endaddr = ~0ul;
458 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
459#endif
460 }
461 }
462 }
463 free(freep);
464 mmap_unlock();
465 }
466#else
467 FILE *f;
468
469 last_brk = (unsigned long)sbrk(0);
470
471 f = fopen("/compat/linux/proc/self/maps", "r");
472 if (f) {
473 mmap_lock();
474
475 do {
476 unsigned long startaddr, endaddr;
477 int n;
478
479 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
480
481 if (n == 2 && h2g_valid(startaddr)) {
482 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
483
484 if (h2g_valid(endaddr)) {
485 endaddr = h2g(endaddr);
486 } else {
487 endaddr = ~0ul;
488 }
489 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
490 }
491 } while (!feof(f));
492
493 fclose(f);
494 mmap_unlock();
495 }
496#endif
497 }
498#endif
499}
500
501static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
502{
503 PageDesc *pd;
504 void **lp;
505 int i;
506
5b6dd868 507 /* Level 1. Always allocated. */
66ec9f49 508 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
5b6dd868
BS
509
510 /* Level 2..N-1. */
66ec9f49 511 for (i = v_l2_levels; i > 0; i--) {
6940fab8 512 void **p = atomic_rcu_read(lp);
5b6dd868
BS
513
514 if (p == NULL) {
78722ed0
EC
515 void *existing;
516
5b6dd868
BS
517 if (!alloc) {
518 return NULL;
519 }
e3a0abfd 520 p = g_new0(void *, V_L2_SIZE);
78722ed0
EC
521 existing = atomic_cmpxchg(lp, NULL, p);
522 if (unlikely(existing)) {
523 g_free(p);
524 p = existing;
525 }
5b6dd868
BS
526 }
527
03f49957 528 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
5b6dd868
BS
529 }
530
6940fab8 531 pd = atomic_rcu_read(lp);
5b6dd868 532 if (pd == NULL) {
78722ed0
EC
533 void *existing;
534
5b6dd868
BS
535 if (!alloc) {
536 return NULL;
537 }
e3a0abfd 538 pd = g_new0(PageDesc, V_L2_SIZE);
0b5c91f7
EC
539#ifndef CONFIG_USER_ONLY
540 {
541 int i;
542
543 for (i = 0; i < V_L2_SIZE; i++) {
544 qemu_spin_init(&pd[i].lock);
545 }
546 }
547#endif
78722ed0
EC
548 existing = atomic_cmpxchg(lp, NULL, pd);
549 if (unlikely(existing)) {
550 g_free(pd);
551 pd = existing;
552 }
5b6dd868
BS
553 }
554
03f49957 555 return pd + (index & (V_L2_SIZE - 1));
5b6dd868
BS
556}
557
558static inline PageDesc *page_find(tb_page_addr_t index)
559{
560 return page_find_alloc(index, 0);
561}
562
0b5c91f7
EC
563static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
564 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
565
566/* In user-mode page locks aren't used; mmap_lock is enough */
567#ifdef CONFIG_USER_ONLY
6d9abf85
EC
568
569#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
570
0b5c91f7
EC
571static inline void page_lock(PageDesc *pd)
572{ }
573
574static inline void page_unlock(PageDesc *pd)
575{ }
576
577static inline void page_lock_tb(const TranslationBlock *tb)
578{ }
579
580static inline void page_unlock_tb(const TranslationBlock *tb)
581{ }
582
583struct page_collection *
584page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
585{
586 return NULL;
587}
588
589void page_collection_unlock(struct page_collection *set)
590{ }
591#else /* !CONFIG_USER_ONLY */
592
6d9abf85
EC
593#ifdef CONFIG_DEBUG_TCG
594
595static __thread GHashTable *ht_pages_locked_debug;
596
597static void ht_pages_locked_debug_init(void)
598{
599 if (ht_pages_locked_debug) {
600 return;
601 }
602 ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
603}
604
605static bool page_is_locked(const PageDesc *pd)
606{
607 PageDesc *found;
608
609 ht_pages_locked_debug_init();
610 found = g_hash_table_lookup(ht_pages_locked_debug, pd);
611 return !!found;
612}
613
614static void page_lock__debug(PageDesc *pd)
615{
616 ht_pages_locked_debug_init();
617 g_assert(!page_is_locked(pd));
618 g_hash_table_insert(ht_pages_locked_debug, pd, pd);
619}
620
621static void page_unlock__debug(const PageDesc *pd)
622{
623 bool removed;
624
625 ht_pages_locked_debug_init();
626 g_assert(page_is_locked(pd));
627 removed = g_hash_table_remove(ht_pages_locked_debug, pd);
628 g_assert(removed);
629}
630
631static void
632do_assert_page_locked(const PageDesc *pd, const char *file, int line)
633{
634 if (unlikely(!page_is_locked(pd))) {
635 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
636 pd, file, line);
637 abort();
638 }
639}
640
641#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
642
faa9372c
EC
643void assert_no_pages_locked(void)
644{
645 ht_pages_locked_debug_init();
646 g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
647}
648
6d9abf85
EC
649#else /* !CONFIG_DEBUG_TCG */
650
651#define assert_page_locked(pd)
652
653static inline void page_lock__debug(const PageDesc *pd)
654{
655}
656
657static inline void page_unlock__debug(const PageDesc *pd)
658{
659}
660
661#endif /* CONFIG_DEBUG_TCG */
662
0b5c91f7
EC
663static inline void page_lock(PageDesc *pd)
664{
6d9abf85 665 page_lock__debug(pd);
0b5c91f7
EC
666 qemu_spin_lock(&pd->lock);
667}
668
669static inline void page_unlock(PageDesc *pd)
670{
671 qemu_spin_unlock(&pd->lock);
6d9abf85 672 page_unlock__debug(pd);
0b5c91f7
EC
673}
674
675/* lock the page(s) of a TB in the correct acquisition order */
676static inline void page_lock_tb(const TranslationBlock *tb)
677{
678 page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
679}
680
681static inline void page_unlock_tb(const TranslationBlock *tb)
682{
a688e73b
EC
683 PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
684
685 page_unlock(p1);
0b5c91f7 686 if (unlikely(tb->page_addr[1] != -1)) {
a688e73b
EC
687 PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
688
689 if (p2 != p1) {
690 page_unlock(p2);
691 }
0b5c91f7
EC
692 }
693}
694
695static inline struct page_entry *
696page_entry_new(PageDesc *pd, tb_page_addr_t index)
697{
698 struct page_entry *pe = g_malloc(sizeof(*pe));
699
700 pe->index = index;
701 pe->pd = pd;
702 pe->locked = false;
703 return pe;
704}
705
706static void page_entry_destroy(gpointer p)
707{
708 struct page_entry *pe = p;
709
710 g_assert(pe->locked);
711 page_unlock(pe->pd);
712 g_free(pe);
713}
714
715/* returns false on success */
716static bool page_entry_trylock(struct page_entry *pe)
717{
718 bool busy;
719
720 busy = qemu_spin_trylock(&pe->pd->lock);
721 if (!busy) {
722 g_assert(!pe->locked);
723 pe->locked = true;
6d9abf85 724 page_lock__debug(pe->pd);
0b5c91f7
EC
725 }
726 return busy;
727}
728
729static void do_page_entry_lock(struct page_entry *pe)
730{
731 page_lock(pe->pd);
732 g_assert(!pe->locked);
733 pe->locked = true;
734}
735
736static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
737{
738 struct page_entry *pe = value;
739
740 do_page_entry_lock(pe);
741 return FALSE;
742}
743
744static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
745{
746 struct page_entry *pe = value;
747
748 if (pe->locked) {
749 pe->locked = false;
750 page_unlock(pe->pd);
751 }
752 return FALSE;
753}
754
755/*
756 * Trylock a page, and if successful, add the page to a collection.
757 * Returns true ("busy") if the page could not be locked; false otherwise.
758 */
759static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
760{
761 tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
762 struct page_entry *pe;
763 PageDesc *pd;
764
765 pe = g_tree_lookup(set->tree, &index);
766 if (pe) {
767 return false;
768 }
769
770 pd = page_find(index);
771 if (pd == NULL) {
772 return false;
773 }
774
775 pe = page_entry_new(pd, index);
776 g_tree_insert(set->tree, &pe->index, pe);
777
778 /*
779 * If this is either (1) the first insertion or (2) a page whose index
780 * is higher than any other so far, just lock the page and move on.
781 */
782 if (set->max == NULL || pe->index > set->max->index) {
783 set->max = pe;
784 do_page_entry_lock(pe);
785 return false;
786 }
787 /*
788 * Try to acquire out-of-order lock; if busy, return busy so that we acquire
789 * locks in order.
790 */
791 return page_entry_trylock(pe);
792}
793
794static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
795{
796 tb_page_addr_t a = *(const tb_page_addr_t *)ap;
797 tb_page_addr_t b = *(const tb_page_addr_t *)bp;
798
799 if (a == b) {
800 return 0;
801 } else if (a < b) {
802 return -1;
803 }
804 return 1;
805}
806
807/*
808 * Lock a range of pages ([@start,@end[) as well as the pages of all
809 * intersecting TBs.
810 * Locking order: acquire locks in ascending order of page index.
811 */
812struct page_collection *
813page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
814{
815 struct page_collection *set = g_malloc(sizeof(*set));
816 tb_page_addr_t index;
817 PageDesc *pd;
818
819 start >>= TARGET_PAGE_BITS;
820 end >>= TARGET_PAGE_BITS;
821 g_assert(start <= end);
822
823 set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
824 page_entry_destroy);
825 set->max = NULL;
faa9372c 826 assert_no_pages_locked();
0b5c91f7
EC
827
828 retry:
829 g_tree_foreach(set->tree, page_entry_lock, NULL);
830
831 for (index = start; index <= end; index++) {
832 TranslationBlock *tb;
833 int n;
834
835 pd = page_find(index);
836 if (pd == NULL) {
837 continue;
838 }
839 if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
840 g_tree_foreach(set->tree, page_entry_unlock, NULL);
841 goto retry;
842 }
6d9abf85 843 assert_page_locked(pd);
0b5c91f7
EC
844 PAGE_FOR_EACH_TB(pd, tb, n) {
845 if (page_trylock_add(set, tb->page_addr[0]) ||
846 (tb->page_addr[1] != -1 &&
847 page_trylock_add(set, tb->page_addr[1]))) {
848 /* drop all locks, and reacquire in order */
849 g_tree_foreach(set->tree, page_entry_unlock, NULL);
850 goto retry;
851 }
852 }
853 }
854 return set;
855}
856
857void page_collection_unlock(struct page_collection *set)
858{
859 /* entries are unlocked and freed via page_entry_destroy */
860 g_tree_destroy(set->tree);
861 g_free(set);
862}
863
864#endif /* !CONFIG_USER_ONLY */
865
866static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
867 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
868{
869 PageDesc *p1, *p2;
a688e73b
EC
870 tb_page_addr_t page1;
871 tb_page_addr_t page2;
0b5c91f7
EC
872
873 assert_memory_lock();
a688e73b
EC
874 g_assert(phys1 != -1);
875
876 page1 = phys1 >> TARGET_PAGE_BITS;
877 page2 = phys2 >> TARGET_PAGE_BITS;
878
879 p1 = page_find_alloc(page1, alloc);
0b5c91f7
EC
880 if (ret_p1) {
881 *ret_p1 = p1;
882 }
883 if (likely(phys2 == -1)) {
884 page_lock(p1);
885 return;
a688e73b
EC
886 } else if (page1 == page2) {
887 page_lock(p1);
888 if (ret_p2) {
889 *ret_p2 = p1;
890 }
891 return;
0b5c91f7 892 }
a688e73b 893 p2 = page_find_alloc(page2, alloc);
0b5c91f7
EC
894 if (ret_p2) {
895 *ret_p2 = p2;
896 }
a688e73b 897 if (page1 < page2) {
0b5c91f7
EC
898 page_lock(p1);
899 page_lock(p2);
900 } else {
901 page_lock(p2);
902 page_lock(p1);
903 }
904}
905
5b6dd868
BS
906/* Minimum size of the code gen buffer. This number is randomly chosen,
907 but not so small that we can't have a fair number of TB's live. */
a2fa63a8 908#define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
5b6dd868
BS
909
910/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
911 indicated, this is constrained by the range of direct branches on the
912 host cpu, as used by the TCG implementation of goto_tb. */
913#if defined(__x86_64__)
a2fa63a8 914# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
5b6dd868 915#elif defined(__sparc__)
a2fa63a8 916# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
5bfd75a3 917#elif defined(__powerpc64__)
a2fa63a8 918# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
399f1648 919#elif defined(__powerpc__)
a2fa63a8 920# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
4a136e0a 921#elif defined(__aarch64__)
a2fa63a8 922# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
5b6dd868
BS
923#elif defined(__s390x__)
924 /* We have a +- 4GB range on the branches; leave some slop. */
a2fa63a8 925# define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
479eb121
RH
926#elif defined(__mips__)
927 /* We have a 256MB branch region, but leave room to make sure the
928 main executable is also within that region. */
a2fa63a8 929# define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
5b6dd868
BS
930#else
931# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
932#endif
933
600e17b2 934#if TCG_TARGET_REG_BITS == 32
a2fa63a8 935#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
600e17b2
AB
936#ifdef CONFIG_USER_ONLY
937/*
938 * For user mode on smaller 32 bit systems we may run into trouble
939 * allocating big chunks of data in the right place. On these systems
940 * we utilise a static code generation buffer directly in the binary.
941 */
942#define USE_STATIC_CODE_GEN_BUFFER
943#endif
944#else /* TCG_TARGET_REG_BITS == 64 */
945#ifdef CONFIG_USER_ONLY
946/*
947 * As user-mode emulation typically means running multiple instances
948 * of the translator don't go too nuts with our default code gen
949 * buffer lest we make things too hard for the OS.
950 */
951#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
952#else
953/*
954 * We expect most system emulation to run one or two guests per host.
955 * Users running large scale system emulation may want to tweak their
956 * runtime setup via the tb-size control on the command line.
957 */
958#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
959#endif
960#endif
5b6dd868
BS
961
962#define DEFAULT_CODE_GEN_BUFFER_SIZE \
963 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
964 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
965
966static inline size_t size_code_gen_buffer(size_t tb_size)
967{
968 /* Size the buffer. */
969 if (tb_size == 0) {
5b6dd868 970 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
5b6dd868
BS
971 }
972 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
973 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
974 }
975 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
976 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
977 }
5b6dd868
BS
978 return tb_size;
979}
980
483c76e1
RH
981#ifdef __mips__
982/* In order to use J and JAL within the code_gen_buffer, we require
983 that the buffer not cross a 256MB boundary. */
984static inline bool cross_256mb(void *addr, size_t size)
985{
7ba6a512 986 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
483c76e1
RH
987}
988
989/* We weren't able to allocate a buffer without crossing that boundary,
990 so make do with the larger portion of the buffer that doesn't cross.
991 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
992static inline void *split_cross_256mb(void *buf1, size_t size1)
993{
7ba6a512 994 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
483c76e1
RH
995 size_t size2 = buf1 + size1 - buf2;
996
997 size1 = buf2 - buf1;
998 if (size1 < size2) {
999 size1 = size2;
1000 buf1 = buf2;
1001 }
1002
b1311c4a 1003 tcg_ctx->code_gen_buffer_size = size1;
483c76e1
RH
1004 return buf1;
1005}
1006#endif
1007
5b6dd868
BS
1008#ifdef USE_STATIC_CODE_GEN_BUFFER
1009static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
1010 __attribute__((aligned(CODE_GEN_ALIGN)));
1011
1012static inline void *alloc_code_gen_buffer(void)
1013{
483c76e1 1014 void *buf = static_code_gen_buffer;
f51f315a 1015 void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
e8feb96f 1016 size_t size;
f293709c 1017
f51f315a
EC
1018 /* page-align the beginning and end of the buffer */
1019 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
1020 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
f293709c 1021
e8feb96f 1022 size = end - buf;
f293709c
RH
1023
1024 /* Honor a command-line option limiting the size of the buffer. */
b1311c4a 1025 if (size > tcg_ctx->code_gen_buffer_size) {
f51f315a
EC
1026 size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1027 qemu_real_host_page_size);
f293709c 1028 }
b1311c4a 1029 tcg_ctx->code_gen_buffer_size = size;
f293709c 1030
483c76e1 1031#ifdef __mips__
f293709c
RH
1032 if (cross_256mb(buf, size)) {
1033 buf = split_cross_256mb(buf, size);
b1311c4a 1034 size = tcg_ctx->code_gen_buffer_size;
483c76e1
RH
1035 }
1036#endif
f293709c 1037
f51f315a
EC
1038 if (qemu_mprotect_rwx(buf, size)) {
1039 abort();
1040 }
f293709c
RH
1041 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1042
483c76e1 1043 return buf;
5b6dd868 1044}
f293709c
RH
1045#elif defined(_WIN32)
1046static inline void *alloc_code_gen_buffer(void)
1047{
b1311c4a 1048 size_t size = tcg_ctx->code_gen_buffer_size;
4a4ff4c5 1049 return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
e8feb96f 1050 PAGE_EXECUTE_READWRITE);
f293709c
RH
1051}
1052#else
5b6dd868
BS
1053static inline void *alloc_code_gen_buffer(void)
1054{
e8feb96f 1055 int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
5b6dd868 1056 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
b1311c4a 1057 size_t size = tcg_ctx->code_gen_buffer_size;
5b6dd868
BS
1058 void *buf;
1059
64547a3b 1060 buf = mmap(NULL, size, prot, flags, -1, 0);
483c76e1
RH
1061 if (buf == MAP_FAILED) {
1062 return NULL;
1063 }
1064
1065#ifdef __mips__
f293709c 1066 if (cross_256mb(buf, size)) {
64547a3b
RH
1067 /*
1068 * Try again, with the original still mapped, to avoid re-acquiring
1069 * the same 256mb crossing.
1070 */
f293709c 1071 size_t size2;
e8feb96f 1072 void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
f68808c7 1073 switch ((int)(buf2 != MAP_FAILED)) {
f293709c
RH
1074 case 1:
1075 if (!cross_256mb(buf2, size)) {
483c76e1 1076 /* Success! Use the new buffer. */
e8feb96f 1077 munmap(buf, size);
f293709c 1078 break;
483c76e1
RH
1079 }
1080 /* Failure. Work with what we had. */
e8feb96f 1081 munmap(buf2, size);
f293709c
RH
1082 /* fallthru */
1083 default:
1084 /* Split the original buffer. Free the smaller half. */
1085 buf2 = split_cross_256mb(buf, size);
b1311c4a 1086 size2 = tcg_ctx->code_gen_buffer_size;
f293709c 1087 if (buf == buf2) {
e8feb96f 1088 munmap(buf + size2, size - size2);
f293709c
RH
1089 } else {
1090 munmap(buf, size - size2);
1091 }
1092 size = size2;
1093 break;
483c76e1 1094 }
f293709c 1095 buf = buf2;
483c76e1
RH
1096 }
1097#endif
1098
f293709c
RH
1099 /* Request large pages for the buffer. */
1100 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
483c76e1 1101
5b6dd868
BS
1102 return buf;
1103}
f293709c 1104#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
5b6dd868
BS
1105
1106static inline void code_gen_alloc(size_t tb_size)
1107{
b1311c4a
EC
1108 tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1109 tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1110 if (tcg_ctx->code_gen_buffer == NULL) {
5b6dd868
BS
1111 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1112 exit(1);
1113 }
5b6dd868
BS
1114}
1115
61b8cef1
EC
1116static bool tb_cmp(const void *ap, const void *bp)
1117{
1118 const TranslationBlock *a = ap;
1119 const TranslationBlock *b = bp;
1120
1121 return a->pc == b->pc &&
1122 a->cs_base == b->cs_base &&
1123 a->flags == b->flags &&
1124 (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1125 a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1126 a->page_addr[0] == b->page_addr[0] &&
1127 a->page_addr[1] == b->page_addr[1];
1128}
1129
909eaac9
EC
1130static void tb_htable_init(void)
1131{
1132 unsigned int mode = QHT_MODE_AUTO_RESIZE;
1133
61b8cef1 1134 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
909eaac9
EC
1135}
1136
5b6dd868
BS
1137/* Must be called before using the QEMU cpus. 'tb_size' is the size
1138 (in bytes) allocated to the translation buffer. Zero means default
1139 size. */
1140void tcg_exec_init(unsigned long tb_size)
1141{
8e2b7299 1142 tcg_allowed = true;
5b6dd868 1143 cpu_gen_init();
5b6dd868 1144 page_init();
909eaac9 1145 tb_htable_init();
f293709c 1146 code_gen_alloc(tb_size);
4cbea598 1147#if defined(CONFIG_SOFTMMU)
5b6dd868
BS
1148 /* There's no guest base to take into account, so go ahead and
1149 initialize the prologue now. */
b1311c4a 1150 tcg_prologue_init(tcg_ctx);
5b6dd868
BS
1151#endif
1152}
1153
0b5c91f7 1154/* call with @p->lock held */
5b6dd868
BS
1155static inline void invalidate_page_bitmap(PageDesc *p)
1156{
6d9abf85 1157 assert_page_locked(p);
6fad459c 1158#ifdef CONFIG_SOFTMMU
012aef07
MA
1159 g_free(p->code_bitmap);
1160 p->code_bitmap = NULL;
5b6dd868 1161 p->code_write_count = 0;
6fad459c 1162#endif
5b6dd868
BS
1163}
1164
1165/* Set to NULL all the 'first_tb' fields in all PageDescs. */
1166static void page_flush_tb_1(int level, void **lp)
1167{
1168 int i;
1169
1170 if (*lp == NULL) {
1171 return;
1172 }
1173 if (level == 0) {
1174 PageDesc *pd = *lp;
1175
03f49957 1176 for (i = 0; i < V_L2_SIZE; ++i) {
0b5c91f7 1177 page_lock(&pd[i]);
1e05197f 1178 pd[i].first_tb = (uintptr_t)NULL;
5b6dd868 1179 invalidate_page_bitmap(pd + i);
0b5c91f7 1180 page_unlock(&pd[i]);
5b6dd868
BS
1181 }
1182 } else {
1183 void **pp = *lp;
1184
03f49957 1185 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
1186 page_flush_tb_1(level - 1, pp + i);
1187 }
1188 }
1189}
1190
1191static void page_flush_tb(void)
1192{
66ec9f49 1193 int i, l1_sz = v_l1_size;
5b6dd868 1194
66ec9f49
VK
1195 for (i = 0; i < l1_sz; i++) {
1196 page_flush_tb_1(v_l2_levels, l1_map + i);
5b6dd868
BS
1197 }
1198}
1199
f19c6cc6
EC
1200static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1201{
1202 const TranslationBlock *tb = value;
1203 size_t *size = data;
1204
1205 *size += tb->tc.size;
1206 return false;
1207}
1208
5b6dd868 1209/* flush all the translation blocks */
14e6fe12 1210static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
5b6dd868 1211{
5025bb7f
EC
1212 bool did_flush = false;
1213
0ac20318 1214 mmap_lock();
14e6fe12 1215 /* If it is already been done on request of another CPU,
3359baad
SF
1216 * just retry.
1217 */
44ded3d0 1218 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
3359baad 1219 goto done;
135a972b 1220 }
5025bb7f 1221 did_flush = true;
3359baad 1222
424079c1 1223 if (DEBUG_TB_FLUSH_GATE) {
be2cdc5e 1224 size_t nb_tbs = tcg_nb_tbs();
f19c6cc6 1225 size_t host_size = 0;
2ac01d6d 1226
be2cdc5e 1227 tcg_tb_foreach(tb_host_size_iter, &host_size);
e8feb96f
EC
1228 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1229 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
5b6dd868 1230 }
5b6dd868 1231
bdc44640 1232 CPU_FOREACH(cpu) {
f3ced3c5 1233 cpu_tb_jmp_cache_clear(cpu);
5b6dd868
BS
1234 }
1235
44ded3d0 1236 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
5b6dd868
BS
1237 page_flush_tb();
1238
e8feb96f 1239 tcg_region_reset_all();
5b6dd868
BS
1240 /* XXX: flush processor icache at this point if cache flush is
1241 expensive */
44ded3d0 1242 atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
3359baad
SF
1243
1244done:
0ac20318 1245 mmap_unlock();
5025bb7f
EC
1246 if (did_flush) {
1247 qemu_plugin_flush_cb();
1248 }
3359baad
SF
1249}
1250
1251void tb_flush(CPUState *cpu)
1252{
1253 if (tcg_enabled()) {
44ded3d0 1254 unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
136094d0
EC
1255
1256 if (cpu_in_exclusive_context(cpu)) {
1257 do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1258 } else {
1259 async_safe_run_on_cpu(cpu, do_tb_flush,
1260 RUN_ON_CPU_HOST_INT(tb_flush_count));
1261 }
3359baad 1262 }
5b6dd868
BS
1263}
1264
6eb062ab
EC
1265/*
1266 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1267 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1268 * and let the optimizer get rid of them by wrapping their user-only callers
1269 * with if (DEBUG_TB_CHECK_GATE).
1270 */
1271#ifdef CONFIG_USER_ONLY
5b6dd868 1272
78255ba2 1273static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
5b6dd868 1274{
909eaac9
EC
1275 TranslationBlock *tb = p;
1276 target_ulong addr = *(target_ulong *)userp;
1277
1278 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1279 printf("ERROR invalidate: address=" TARGET_FMT_lx
1280 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1281 }
1282}
5b6dd868 1283
7d7500d9
PB
1284/* verify that all the pages have correct rights for code
1285 *
0ac20318 1286 * Called with mmap_lock held.
7d7500d9 1287 */
909eaac9
EC
1288static void tb_invalidate_check(target_ulong address)
1289{
5b6dd868 1290 address &= TARGET_PAGE_MASK;
44ded3d0 1291 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
909eaac9
EC
1292}
1293
78255ba2 1294static void do_tb_page_check(void *p, uint32_t hash, void *userp)
909eaac9
EC
1295{
1296 TranslationBlock *tb = p;
1297 int flags1, flags2;
1298
1299 flags1 = page_get_flags(tb->pc);
1300 flags2 = page_get_flags(tb->pc + tb->size - 1);
1301 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1302 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1303 (long)tb->pc, tb->size, flags1, flags2);
5b6dd868
BS
1304 }
1305}
1306
1307/* verify that all the pages have correct rights for code */
1308static void tb_page_check(void)
1309{
44ded3d0 1310 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
5b6dd868
BS
1311}
1312
6eb062ab 1313#endif /* CONFIG_USER_ONLY */
5b6dd868 1314
0ac20318
EC
1315/*
1316 * user-mode: call with mmap_lock held
1317 * !user-mode: call with @pd->lock held
1318 */
1e05197f 1319static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
5b6dd868
BS
1320{
1321 TranslationBlock *tb1;
1e05197f 1322 uintptr_t *pprev;
5b6dd868
BS
1323 unsigned int n1;
1324
6d9abf85 1325 assert_page_locked(pd);
1e05197f
EC
1326 pprev = &pd->first_tb;
1327 PAGE_FOR_EACH_TB(pd, tb1, n1) {
5b6dd868 1328 if (tb1 == tb) {
1e05197f
EC
1329 *pprev = tb1->page_next[n1];
1330 return;
5b6dd868 1331 }
1e05197f 1332 pprev = &tb1->page_next[n1];
5b6dd868 1333 }
1e05197f 1334 g_assert_not_reached();
5b6dd868
BS
1335}
1336
194125e3
EC
1337/* remove @orig from its @n_orig-th jump list */
1338static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
5b6dd868 1339{
194125e3
EC
1340 uintptr_t ptr, ptr_locked;
1341 TranslationBlock *dest;
1342 TranslationBlock *tb;
1343 uintptr_t *pprev;
1344 int n;
5b6dd868 1345
194125e3
EC
1346 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1347 ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1348 dest = (TranslationBlock *)(ptr & ~1);
1349 if (dest == NULL) {
1350 return;
1351 }
5b6dd868 1352
194125e3
EC
1353 qemu_spin_lock(&dest->jmp_lock);
1354 /*
1355 * While acquiring the lock, the jump might have been removed if the
1356 * destination TB was invalidated; check again.
1357 */
1358 ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
1359 if (ptr_locked != ptr) {
1360 qemu_spin_unlock(&dest->jmp_lock);
1361 /*
1362 * The only possibility is that the jump was unlinked via
1363 * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1364 * because we set the LSB above.
1365 */
1366 g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1367 return;
5b6dd868 1368 }
194125e3
EC
1369 /*
1370 * We first acquired the lock, and since the destination pointer matches,
1371 * we know for sure that @orig is in the jmp list.
1372 */
1373 pprev = &dest->jmp_list_head;
1374 TB_FOR_EACH_JMP(dest, tb, n) {
1375 if (tb == orig && n == n_orig) {
1376 *pprev = tb->jmp_list_next[n];
1377 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1378 qemu_spin_unlock(&dest->jmp_lock);
1379 return;
1380 }
1381 pprev = &tb->jmp_list_next[n];
1382 }
1383 g_assert_not_reached();
5b6dd868
BS
1384}
1385
1386/* reset the jump entry 'n' of a TB so that it is not chained to
1387 another TB */
1388static inline void tb_reset_jump(TranslationBlock *tb, int n)
1389{
e7e168f4 1390 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
f309101c 1391 tb_set_jmp_target(tb, n, addr);
5b6dd868
BS
1392}
1393
89bba496 1394/* remove any jumps to the TB */
194125e3 1395static inline void tb_jmp_unlink(TranslationBlock *dest)
89bba496 1396{
194125e3
EC
1397 TranslationBlock *tb;
1398 int n;
89bba496 1399
194125e3
EC
1400 qemu_spin_lock(&dest->jmp_lock);
1401
1402 TB_FOR_EACH_JMP(dest, tb, n) {
1403 tb_reset_jump(tb, n);
1404 atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1405 /* No need to clear the list entry; setting the dest ptr is enough */
89bba496 1406 }
194125e3
EC
1407 dest->jmp_list_head = (uintptr_t)NULL;
1408
1409 qemu_spin_unlock(&dest->jmp_lock);
89bba496
SF
1410}
1411
0ac20318
EC
1412/*
1413 * In user-mode, call with mmap_lock held.
1414 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1415 * locks held.
1416 */
0b5c91f7 1417static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
5b6dd868 1418{
182735ef 1419 CPUState *cpu;
5b6dd868 1420 PageDesc *p;
42bd3228 1421 uint32_t h;
5b6dd868 1422 tb_page_addr_t phys_pc;
5b6dd868 1423
0ac20318 1424 assert_memory_lock();
e505a063 1425
194125e3
EC
1426 /* make sure no further incoming jumps will be chained to this TB */
1427 qemu_spin_lock(&tb->jmp_lock);
84f1c148 1428 atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
194125e3 1429 qemu_spin_unlock(&tb->jmp_lock);
6d21e420 1430
5b6dd868
BS
1431 /* remove the TB from the hash list */
1432 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
194125e3 1433 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
4e2ca83e 1434 tb->trace_vcpu_dstate);
ec7eb2ae
EC
1435 if (!(tb->cflags & CF_NOCACHE) &&
1436 !qht_remove(&tb_ctx.htable, tb, h)) {
cc689485
EC
1437 return;
1438 }
5b6dd868
BS
1439
1440 /* remove the TB from the page list */
0b5c91f7 1441 if (rm_from_page_list) {
5b6dd868 1442 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1e05197f 1443 tb_page_remove(p, tb);
5b6dd868 1444 invalidate_page_bitmap(p);
0b5c91f7
EC
1445 if (tb->page_addr[1] != -1) {
1446 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1447 tb_page_remove(p, tb);
1448 invalidate_page_bitmap(p);
1449 }
5b6dd868
BS
1450 }
1451
5b6dd868
BS
1452 /* remove the TB from the hash list */
1453 h = tb_jmp_cache_hash_func(tb->pc);
bdc44640 1454 CPU_FOREACH(cpu) {
89a16b1e
SF
1455 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1456 atomic_set(&cpu->tb_jmp_cache[h], NULL);
5b6dd868
BS
1457 }
1458 }
1459
1460 /* suppress this TB from the two jump lists */
13362678
SF
1461 tb_remove_from_jmp_list(tb, 0);
1462 tb_remove_from_jmp_list(tb, 1);
5b6dd868
BS
1463
1464 /* suppress any remaining jumps to this TB */
89bba496 1465 tb_jmp_unlink(tb);
5b6dd868 1466
128ed227
EC
1467 atomic_set(&tcg_ctx->tb_phys_invalidate_count,
1468 tcg_ctx->tb_phys_invalidate_count + 1);
5b6dd868
BS
1469}
1470
0b5c91f7
EC
1471static void tb_phys_invalidate__locked(TranslationBlock *tb)
1472{
1473 do_tb_phys_invalidate(tb, true);
1474}
1475
1476/* invalidate one TB
1477 *
0ac20318 1478 * Called with mmap_lock held in user-mode.
0b5c91f7
EC
1479 */
1480void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1481{
9739e376 1482 if (page_addr == -1 && tb->page_addr[0] != -1) {
0b5c91f7
EC
1483 page_lock_tb(tb);
1484 do_tb_phys_invalidate(tb, true);
1485 page_unlock_tb(tb);
1486 } else {
1487 do_tb_phys_invalidate(tb, false);
1488 }
1489}
1490
6fad459c 1491#ifdef CONFIG_SOFTMMU
0b5c91f7 1492/* call with @p->lock held */
5b6dd868
BS
1493static void build_page_bitmap(PageDesc *p)
1494{
1495 int n, tb_start, tb_end;
1496 TranslationBlock *tb;
1497
6d9abf85 1498 assert_page_locked(p);
510a647f 1499 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
5b6dd868 1500
1e05197f 1501 PAGE_FOR_EACH_TB(p, tb, n) {
5b6dd868
BS
1502 /* NOTE: this is subtle as a TB may span two physical pages */
1503 if (n == 0) {
1504 /* NOTE: tb_end may be after the end of the page, but
1505 it is not a problem */
1506 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1507 tb_end = tb_start + tb->size;
1508 if (tb_end > TARGET_PAGE_SIZE) {
1509 tb_end = TARGET_PAGE_SIZE;
e505a063 1510 }
5b6dd868
BS
1511 } else {
1512 tb_start = 0;
1513 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1514 }
510a647f 1515 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
5b6dd868
BS
1516 }
1517}
6fad459c 1518#endif
5b6dd868 1519
e90d96b1
SF
1520/* add the tb in the target page and protect it if necessary
1521 *
1522 * Called with mmap_lock held for user-mode emulation.
0ac20318 1523 * Called with @p->lock held in !user-mode.
e90d96b1 1524 */
0b5c91f7
EC
1525static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1526 unsigned int n, tb_page_addr_t page_addr)
e90d96b1 1527{
e90d96b1
SF
1528#ifndef CONFIG_USER_ONLY
1529 bool page_already_protected;
1530#endif
1531
6d9abf85 1532 assert_page_locked(p);
e505a063 1533
e90d96b1 1534 tb->page_addr[n] = page_addr;
e90d96b1
SF
1535 tb->page_next[n] = p->first_tb;
1536#ifndef CONFIG_USER_ONLY
1e05197f 1537 page_already_protected = p->first_tb != (uintptr_t)NULL;
e90d96b1 1538#endif
1e05197f 1539 p->first_tb = (uintptr_t)tb | n;
e90d96b1
SF
1540 invalidate_page_bitmap(p);
1541
1542#if defined(CONFIG_USER_ONLY)
1543 if (p->flags & PAGE_WRITE) {
1544 target_ulong addr;
1545 PageDesc *p2;
1546 int prot;
1547
1548 /* force the host page as non writable (writes will have a
1549 page fault + mprotect overhead) */
1550 page_addr &= qemu_host_page_mask;
1551 prot = 0;
1552 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1553 addr += TARGET_PAGE_SIZE) {
1554
1555 p2 = page_find(addr >> TARGET_PAGE_BITS);
1556 if (!p2) {
1557 continue;
1558 }
1559 prot |= p2->flags;
1560 p2->flags &= ~PAGE_WRITE;
1561 }
1562 mprotect(g2h(page_addr), qemu_host_page_size,
1563 (prot & PAGE_BITS) & ~PAGE_WRITE);
dae9e03a
EC
1564 if (DEBUG_TB_INVALIDATE_GATE) {
1565 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1566 }
e90d96b1
SF
1567 }
1568#else
1569 /* if some code is already present, then the pages are already
1570 protected. So we handle the case where only the first TB is
1571 allocated in a physical page */
1572 if (!page_already_protected) {
1573 tlb_protect_code(page_addr);
1574 }
1575#endif
1576}
1577
1578/* add a new TB and link it to the physical page tables. phys_page2 is
1579 * (-1) to indicate that only one page contains the TB.
1580 *
1581 * Called with mmap_lock held for user-mode emulation.
95590e24
EC
1582 *
1583 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1584 * Note that in !user-mode, another thread might have already added a TB
1585 * for the same block of guest code that @tb corresponds to. In that case,
1586 * the caller should discard the original @tb, and use instead the returned TB.
e90d96b1 1587 */
95590e24
EC
1588static TranslationBlock *
1589tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1590 tb_page_addr_t phys_page2)
e90d96b1 1591{
0b5c91f7
EC
1592 PageDesc *p;
1593 PageDesc *p2 = NULL;
e90d96b1 1594
e505a063
AB
1595 assert_memory_lock();
1596
9739e376
PM
1597 if (phys_pc == -1) {
1598 /*
1599 * If the TB is not associated with a physical RAM page then
1600 * it must be a temporary one-insn TB, and we have nothing to do
1601 * except fill in the page_addr[] fields.
1602 */
1603 assert(tb->cflags & CF_NOCACHE);
1604 tb->page_addr[0] = tb->page_addr[1] = -1;
1605 return tb;
1606 }
1607
0b5c91f7
EC
1608 /*
1609 * Add the TB to the page list, acquiring first the pages's locks.
95590e24
EC
1610 * We keep the locks held until after inserting the TB in the hash table,
1611 * so that if the insertion fails we know for sure that the TBs are still
1612 * in the page descriptors.
1613 * Note that inserting into the hash table first isn't an option, since
1614 * we can only insert TBs that are fully initialized.
0b5c91f7
EC
1615 */
1616 page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1617 tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1618 if (p2) {
1619 tb_page_add(p2, tb, 1, phys_page2);
e90d96b1
SF
1620 } else {
1621 tb->page_addr[1] = -1;
1622 }
1623
ec7eb2ae
EC
1624 if (!(tb->cflags & CF_NOCACHE)) {
1625 void *existing_tb = NULL;
1626 uint32_t h;
95590e24 1627
ec7eb2ae
EC
1628 /* add in the hash table */
1629 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1630 tb->trace_vcpu_dstate);
1631 qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1632
1633 /* remove TB from the page(s) if we couldn't insert it */
1634 if (unlikely(existing_tb)) {
1635 tb_page_remove(p, tb);
1636 invalidate_page_bitmap(p);
1637 if (p2) {
1638 tb_page_remove(p2, tb);
1639 invalidate_page_bitmap(p2);
1640 }
1641 tb = existing_tb;
95590e24 1642 }
95590e24
EC
1643 }
1644
a688e73b 1645 if (p2 && p2 != p) {
0b5c91f7
EC
1646 page_unlock(p2);
1647 }
1648 page_unlock(p);
1649
6eb062ab
EC
1650#ifdef CONFIG_USER_ONLY
1651 if (DEBUG_TB_CHECK_GATE) {
1652 tb_page_check();
1653 }
e90d96b1 1654#endif
95590e24 1655 return tb;
e90d96b1
SF
1656}
1657
75692087 1658/* Called with mmap_lock held for user mode emulation. */
648f034c 1659TranslationBlock *tb_gen_code(CPUState *cpu,
5b6dd868 1660 target_ulong pc, target_ulong cs_base,
89fee74a 1661 uint32_t flags, int cflags)
5b6dd868 1662{
648f034c 1663 CPUArchState *env = cpu->env_ptr;
95590e24 1664 TranslationBlock *tb, *existing_tb;
5b6dd868
BS
1665 tb_page_addr_t phys_pc, phys_page2;
1666 target_ulong virt_page2;
fec88f64 1667 tcg_insn_unit *gen_code_buf;
8b86d6d2 1668 int gen_code_size, search_size, max_insns;
fec88f64 1669#ifdef CONFIG_PROFILER
c3fac113 1670 TCGProfile *prof = &tcg_ctx->prof;
fec88f64
RH
1671 int64_t ti;
1672#endif
fe9b676f 1673
e505a063 1674 assert_memory_lock();
5b6dd868
BS
1675
1676 phys_pc = get_page_addr_code(env, pc);
b125f9dc 1677
9739e376
PM
1678 if (phys_pc == -1) {
1679 /* Generate a temporary TB with 1 insn in it */
1680 cflags &= ~CF_COUNT_MASK;
1681 cflags |= CF_NOCACHE | 1;
1682 }
1683
f7b78602
PM
1684 cflags &= ~CF_CLUSTER_MASK;
1685 cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
1686
8b86d6d2
RH
1687 max_insns = cflags & CF_COUNT_MASK;
1688 if (max_insns == 0) {
1689 max_insns = CF_COUNT_MASK;
1690 }
1691 if (max_insns > TCG_MAX_INSNS) {
1692 max_insns = TCG_MAX_INSNS;
1693 }
1694 if (cpu->singlestep_enabled || singlestep) {
1695 max_insns = 1;
1696 }
1697
e8feb96f 1698 buffer_overflow:
fe9b676f 1699 tb = tcg_tb_alloc(tcg_ctx);
b125f9dc 1700 if (unlikely(!tb)) {
5b6dd868 1701 /* flush must be done */
bbd77c18 1702 tb_flush(cpu);
3359baad 1703 mmap_unlock();
8499c8fc
PD
1704 /* Make the execution loop process the flush as soon as possible. */
1705 cpu->exception_index = EXCP_INTERRUPT;
3359baad 1706 cpu_loop_exit(cpu);
5b6dd868 1707 }
fec88f64 1708
b1311c4a 1709 gen_code_buf = tcg_ctx->code_gen_ptr;
e7e168f4 1710 tb->tc.ptr = gen_code_buf;
2b48e10f 1711 tb->pc = pc;
5b6dd868
BS
1712 tb->cs_base = cs_base;
1713 tb->flags = flags;
1714 tb->cflags = cflags;
1b194002 1715 tb->orig_tb = NULL;
61a67f71 1716 tb->trace_vcpu_dstate = *cpu->trace_dstate;
b1311c4a 1717 tcg_ctx->tb_cflags = cflags;
6e6c4efe 1718 tb_overflow:
fec88f64
RH
1719
1720#ifdef CONFIG_PROFILER
c3fac113
EC
1721 /* includes aborted translations because of exceptions */
1722 atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
fec88f64
RH
1723 ti = profile_getclock();
1724#endif
1725
b1311c4a 1726 tcg_func_start(tcg_ctx);
fec88f64 1727
29a0af61 1728 tcg_ctx->cpu = env_cpu(env);
8b86d6d2 1729 gen_intermediate_code(cpu, tb, max_insns);
b1311c4a 1730 tcg_ctx->cpu = NULL;
fec88f64 1731
e7e168f4 1732 trace_translate_block(tb, tb->pc, tb->tc.ptr);
fec88f64
RH
1733
1734 /* generate machine code */
f309101c
SF
1735 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1736 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
b1311c4a 1737 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
a8583393 1738 if (TCG_TARGET_HAS_direct_jump) {
b1311c4a
EC
1739 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1740 tcg_ctx->tb_jmp_target_addr = NULL;
a8583393 1741 } else {
b1311c4a
EC
1742 tcg_ctx->tb_jmp_insn_offset = NULL;
1743 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
a8583393 1744 }
fec88f64
RH
1745
1746#ifdef CONFIG_PROFILER
c3fac113
EC
1747 atomic_set(&prof->tb_count, prof->tb_count + 1);
1748 atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
0aecede6 1749 ti = profile_getclock();
fec88f64
RH
1750#endif
1751
b1311c4a 1752 gen_code_size = tcg_gen_code(tcg_ctx, tb);
b125f9dc 1753 if (unlikely(gen_code_size < 0)) {
6e6c4efe
RH
1754 switch (gen_code_size) {
1755 case -1:
1756 /*
1757 * Overflow of code_gen_buffer, or the current slice of it.
1758 *
1759 * TODO: We don't need to re-do gen_intermediate_code, nor
1760 * should we re-do the tcg optimization currently hidden
1761 * inside tcg_gen_code. All that should be required is to
1762 * flush the TBs, allocate a new TB, re-initialize it per
1763 * above, and re-do the actual code generation.
1764 */
1765 goto buffer_overflow;
1766
1767 case -2:
1768 /*
1769 * The code generated for the TranslationBlock is too large.
1770 * The maximum size allowed by the unwind info is 64k.
1771 * There may be stricter constraints from relocations
1772 * in the tcg backend.
1773 *
1774 * Try again with half as many insns as we attempted this time.
1775 * If a single insn overflows, there's a bug somewhere...
1776 */
1777 max_insns = tb->icount;
1778 assert(max_insns > 1);
1779 max_insns /= 2;
1780 goto tb_overflow;
1781
1782 default:
1783 g_assert_not_reached();
1784 }
b125f9dc 1785 }
fca8a500 1786 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
b125f9dc
RH
1787 if (unlikely(search_size < 0)) {
1788 goto buffer_overflow;
1789 }
2ac01d6d 1790 tb->tc.size = gen_code_size;
fec88f64
RH
1791
1792#ifdef CONFIG_PROFILER
c3fac113
EC
1793 atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1794 atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1795 atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1796 atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
fec88f64
RH
1797#endif
1798
1799#ifdef DEBUG_DISAS
d977e1c2
AB
1800 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1801 qemu_log_in_addr_range(tb->pc)) {
fc59d2d8 1802 FILE *logfile = qemu_log_lock();
5f0df033
AB
1803 int code_size, data_size = 0;
1804 g_autoptr(GString) note = g_string_new("[tb header & initial instruction]");
1805 size_t chunk_start = 0;
1806 int insn = 0;
fec88f64 1807 qemu_log("OUT: [size=%d]\n", gen_code_size);
b1311c4a 1808 if (tcg_ctx->data_gen_ptr) {
5f0df033
AB
1809 code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1810 data_size = gen_code_size - code_size;
1811 } else {
1812 code_size = gen_code_size;
1813 }
57a26946 1814
5f0df033
AB
1815 /* Dump header and the first instruction */
1816 chunk_start = tcg_ctx->gen_insn_end_off[insn];
1817 log_disas(tb->tc.ptr, chunk_start, note->str);
57a26946 1818
5f0df033
AB
1819 /*
1820 * Dump each instruction chunk, wrapping up empty chunks into
1821 * the next instruction. The whole array is offset so the
1822 * first entry is the beginning of the 2nd instruction.
1823 */
1824 while (insn <= tb->icount && chunk_start < code_size) {
1825 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
1826 if (chunk_end > chunk_start) {
1827 g_string_printf(note, "[guest addr: " TARGET_FMT_lx "]",
1828 tcg_ctx->gen_insn_data[insn][0]);
1829 log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start,
1830 note->str);
1831 chunk_start = chunk_end;
1832 }
1833 insn++;
1834 }
1835
1836 /* Finally dump any data we may have after the block */
1837 if (data_size) {
1838 int i;
1839 qemu_log(" data: [size=%d]\n", data_size);
57a26946
RH
1840 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1841 if (sizeof(tcg_target_ulong) == 8) {
1842 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
b1311c4a
EC
1843 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1844 *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
57a26946
RH
1845 } else {
1846 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
b1311c4a
EC
1847 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1848 *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
57a26946
RH
1849 }
1850 }
57a26946 1851 }
fec88f64
RH
1852 qemu_log("\n");
1853 qemu_log_flush();
fc59d2d8 1854 qemu_log_unlock(logfile);
fec88f64
RH
1855 }
1856#endif
1857
e8feb96f 1858 atomic_set(&tcg_ctx->code_gen_ptr, (void *)
fca8a500 1859 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
e8feb96f 1860 CODE_GEN_ALIGN));
5b6dd868 1861
901bc3de 1862 /* init jump list */
194125e3
EC
1863 qemu_spin_init(&tb->jmp_lock);
1864 tb->jmp_list_head = (uintptr_t)NULL;
901bc3de
SF
1865 tb->jmp_list_next[0] = (uintptr_t)NULL;
1866 tb->jmp_list_next[1] = (uintptr_t)NULL;
194125e3
EC
1867 tb->jmp_dest[0] = (uintptr_t)NULL;
1868 tb->jmp_dest[1] = (uintptr_t)NULL;
901bc3de 1869
696c7066 1870 /* init original jump addresses which have been set during tcg_gen_code() */
901bc3de
SF
1871 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1872 tb_reset_jump(tb, 0);
1873 }
1874 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1875 tb_reset_jump(tb, 1);
1876 }
1877
5b6dd868
BS
1878 /* check next page if needed */
1879 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1880 phys_page2 = -1;
1881 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1882 phys_page2 = get_page_addr_code(env, virt_page2);
1883 }
0ac20318
EC
1884 /*
1885 * No explicit memory barrier is required -- tb_link_page() makes the
1886 * TB visible in a consistent state.
901bc3de 1887 */
95590e24
EC
1888 existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1889 /* if the TB already exists, discard what we just translated */
1890 if (unlikely(existing_tb != tb)) {
1891 uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1892
1893 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1894 atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
938e897a 1895 tb_destroy(tb);
95590e24
EC
1896 return existing_tb;
1897 }
be2cdc5e 1898 tcg_tb_insert(tb);
5b6dd868
BS
1899 return tb;
1900}
1901
5b6dd868 1902/*
0b5c91f7 1903 * @p must be non-NULL.
0ac20318
EC
1904 * user-mode: call with mmap_lock held.
1905 * !user-mode: call with all @pages locked.
5b6dd868 1906 */
0b5c91f7
EC
1907static void
1908tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1909 PageDesc *p, tb_page_addr_t start,
1910 tb_page_addr_t end,
5a7c27bb 1911 uintptr_t retaddr)
5b6dd868 1912{
1e05197f 1913 TranslationBlock *tb;
5b6dd868 1914 tb_page_addr_t tb_start, tb_end;
5b6dd868
BS
1915 int n;
1916#ifdef TARGET_HAS_PRECISE_SMC
9b990ee5
RH
1917 CPUState *cpu = current_cpu;
1918 CPUArchState *env = NULL;
5a7c27bb
RH
1919 bool current_tb_not_found = retaddr != 0;
1920 bool current_tb_modified = false;
5b6dd868 1921 TranslationBlock *current_tb = NULL;
5b6dd868
BS
1922 target_ulong current_pc = 0;
1923 target_ulong current_cs_base = 0;
89fee74a 1924 uint32_t current_flags = 0;
5b6dd868
BS
1925#endif /* TARGET_HAS_PRECISE_SMC */
1926
6d9abf85 1927 assert_page_locked(p);
e505a063 1928
baea4fae 1929#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1930 if (cpu != NULL) {
1931 env = cpu->env_ptr;
d77953b9 1932 }
4917cf44 1933#endif
5b6dd868
BS
1934
1935 /* we remove all the TBs in the range [start, end[ */
1936 /* XXX: see if in some cases it could be faster to invalidate all
1937 the code */
1e05197f 1938 PAGE_FOR_EACH_TB(p, tb, n) {
6d9abf85 1939 assert_page_locked(p);
5b6dd868
BS
1940 /* NOTE: this is subtle as a TB may span two physical pages */
1941 if (n == 0) {
1942 /* NOTE: tb_end may be after the end of the page, but
1943 it is not a problem */
1944 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1945 tb_end = tb_start + tb->size;
1946 } else {
1947 tb_start = tb->page_addr[1];
1948 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1949 }
1950 if (!(tb_end <= start || tb_start >= end)) {
1951#ifdef TARGET_HAS_PRECISE_SMC
1952 if (current_tb_not_found) {
5a7c27bb
RH
1953 current_tb_not_found = false;
1954 /* now we have a real cpu fault */
1955 current_tb = tcg_tb_lookup(retaddr);
5b6dd868
BS
1956 }
1957 if (current_tb == tb &&
194125e3 1958 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
5a7c27bb
RH
1959 /*
1960 * If we are modifying the current TB, we must stop
1961 * its execution. We could be more precise by checking
1962 * that the modification is after the current PC, but it
1963 * would require a specialized function to partially
1964 * restore the CPU state.
1965 */
1966 current_tb_modified = true;
1967 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
5b6dd868
BS
1968 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1969 &current_flags);
1970 }
1971#endif /* TARGET_HAS_PRECISE_SMC */
0b5c91f7 1972 tb_phys_invalidate__locked(tb);
5b6dd868 1973 }
5b6dd868
BS
1974 }
1975#if !defined(CONFIG_USER_ONLY)
1976 /* if no code remaining, no need to continue to use slow writes */
1977 if (!p->first_tb) {
1978 invalidate_page_bitmap(p);
fc377bcf 1979 tlb_unprotect_code(start);
5b6dd868
BS
1980 }
1981#endif
1982#ifdef TARGET_HAS_PRECISE_SMC
1983 if (current_tb_modified) {
0b5c91f7 1984 page_collection_unlock(pages);
9b990ee5
RH
1985 /* Force execution of one insn next time. */
1986 cpu->cflags_next_tb = 1 | curr_cflags();
0ac20318 1987 mmap_unlock();
6886b980 1988 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
1989 }
1990#endif
1991}
1992
0b5c91f7
EC
1993/*
1994 * Invalidate all TBs which intersect with the target physical address range
1995 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1996 * 'is_cpu_write_access' should be true if called from a real cpu write
1997 * access: the virtual CPU will exit the current TB if code is modified inside
1998 * this TB.
1999 *
0ac20318 2000 * Called with mmap_lock held for user-mode emulation
0b5c91f7 2001 */
ce9f5e27 2002void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
0b5c91f7
EC
2003{
2004 struct page_collection *pages;
2005 PageDesc *p;
2006
2007 assert_memory_lock();
0b5c91f7
EC
2008
2009 p = page_find(start >> TARGET_PAGE_BITS);
2010 if (p == NULL) {
2011 return;
2012 }
2013 pages = page_collection_lock(start, end);
ce9f5e27 2014 tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
0b5c91f7
EC
2015 page_collection_unlock(pages);
2016}
2017
45c73de5
EC
2018/*
2019 * Invalidate all TBs which intersect with the target physical address range
2020 * [start;end[. NOTE: start and end may refer to *different* physical pages.
2021 * 'is_cpu_write_access' should be true if called from a real cpu write
2022 * access: the virtual CPU will exit the current TB if code is modified inside
2023 * this TB.
2024 *
0ac20318 2025 * Called with mmap_lock held for user-mode emulation.
45c73de5 2026 */
8bca9a03
PB
2027#ifdef CONFIG_SOFTMMU
2028void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
2029#else
2030void tb_invalidate_phys_range(target_ulong start, target_ulong end)
2031#endif
45c73de5 2032{
0b5c91f7 2033 struct page_collection *pages;
45c73de5
EC
2034 tb_page_addr_t next;
2035
0ac20318
EC
2036 assert_memory_lock();
2037
0b5c91f7 2038 pages = page_collection_lock(start, end);
45c73de5
EC
2039 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2040 start < end;
2041 start = next, next += TARGET_PAGE_SIZE) {
0b5c91f7 2042 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
45c73de5
EC
2043 tb_page_addr_t bound = MIN(next, end);
2044
0b5c91f7
EC
2045 if (pd == NULL) {
2046 continue;
2047 }
2048 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
45c73de5 2049 }
0b5c91f7 2050 page_collection_unlock(pages);
45c73de5
EC
2051}
2052
6fad459c 2053#ifdef CONFIG_SOFTMMU
ba051fb5
AB
2054/* len must be <= 8 and start must be a multiple of len.
2055 * Called via softmmu_template.h when code areas are written to with
8d04fb55 2056 * iothread mutex not held.
0ac20318
EC
2057 *
2058 * Call with all @pages in the range [@start, @start + len[ locked.
ba051fb5 2059 */
0ac20318 2060void tb_invalidate_phys_page_fast(struct page_collection *pages,
5a7c27bb
RH
2061 tb_page_addr_t start, int len,
2062 uintptr_t retaddr)
5b6dd868
BS
2063{
2064 PageDesc *p;
5b6dd868 2065
ba051fb5
AB
2066 assert_memory_lock();
2067
5b6dd868
BS
2068 p = page_find(start >> TARGET_PAGE_BITS);
2069 if (!p) {
2070 return;
2071 }
0b5c91f7 2072
6d9abf85 2073 assert_page_locked(p);
fc377bcf
PB
2074 if (!p->code_bitmap &&
2075 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
fc377bcf
PB
2076 build_page_bitmap(p);
2077 }
5b6dd868 2078 if (p->code_bitmap) {
510a647f
EC
2079 unsigned int nr;
2080 unsigned long b;
2081
2082 nr = start & ~TARGET_PAGE_MASK;
2083 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
5b6dd868
BS
2084 if (b & ((1 << len) - 1)) {
2085 goto do_invalidate;
2086 }
2087 } else {
2088 do_invalidate:
5a7c27bb
RH
2089 tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
2090 retaddr);
5b6dd868
BS
2091 }
2092}
6fad459c 2093#else
75809229
PM
2094/* Called with mmap_lock held. If pc is not 0 then it indicates the
2095 * host PC of the faulting store instruction that caused this invalidate.
2096 * Returns true if the caller needs to abort execution of the current
2097 * TB (because it was modified by this store and the guest CPU has
2098 * precise-SMC semantics).
2099 */
2100static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
5b6dd868
BS
2101{
2102 TranslationBlock *tb;
2103 PageDesc *p;
2104 int n;
2105#ifdef TARGET_HAS_PRECISE_SMC
2106 TranslationBlock *current_tb = NULL;
4917cf44
AF
2107 CPUState *cpu = current_cpu;
2108 CPUArchState *env = NULL;
5b6dd868
BS
2109 int current_tb_modified = 0;
2110 target_ulong current_pc = 0;
2111 target_ulong current_cs_base = 0;
89fee74a 2112 uint32_t current_flags = 0;
5b6dd868
BS
2113#endif
2114
ba051fb5
AB
2115 assert_memory_lock();
2116
5b6dd868
BS
2117 addr &= TARGET_PAGE_MASK;
2118 p = page_find(addr >> TARGET_PAGE_BITS);
2119 if (!p) {
75809229 2120 return false;
5b6dd868 2121 }
a5e99826 2122
5b6dd868 2123#ifdef TARGET_HAS_PRECISE_SMC
1e05197f 2124 if (p->first_tb && pc != 0) {
be2cdc5e 2125 current_tb = tcg_tb_lookup(pc);
5b6dd868 2126 }
4917cf44
AF
2127 if (cpu != NULL) {
2128 env = cpu->env_ptr;
d77953b9 2129 }
5b6dd868 2130#endif
6d9abf85 2131 assert_page_locked(p);
1e05197f 2132 PAGE_FOR_EACH_TB(p, tb, n) {
5b6dd868
BS
2133#ifdef TARGET_HAS_PRECISE_SMC
2134 if (current_tb == tb &&
194125e3 2135 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
5b6dd868
BS
2136 /* If we are modifying the current TB, we must stop
2137 its execution. We could be more precise by checking
2138 that the modification is after the current PC, but it
2139 would require a specialized function to partially
2140 restore the CPU state */
2141
2142 current_tb_modified = 1;
afd46fca 2143 cpu_restore_state_from_tb(cpu, current_tb, pc, true);
5b6dd868
BS
2144 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2145 &current_flags);
2146 }
2147#endif /* TARGET_HAS_PRECISE_SMC */
2148 tb_phys_invalidate(tb, addr);
5b6dd868 2149 }
1e05197f 2150 p->first_tb = (uintptr_t)NULL;
5b6dd868
BS
2151#ifdef TARGET_HAS_PRECISE_SMC
2152 if (current_tb_modified) {
9b990ee5
RH
2153 /* Force execution of one insn next time. */
2154 cpu->cflags_next_tb = 1 | curr_cflags();
75809229 2155 return true;
5b6dd868
BS
2156 }
2157#endif
a5e99826 2158
75809229 2159 return false;
5b6dd868
BS
2160}
2161#endif
2162
0ac20318 2163/* user-mode: call with mmap_lock held */
ae57db63 2164void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
5b6dd868
BS
2165{
2166 TranslationBlock *tb;
2167
0ac20318
EC
2168 assert_memory_lock();
2169
ae57db63 2170 tb = tcg_tb_lookup(retaddr);
8d302e76
AJ
2171 if (tb) {
2172 /* We can use retranslation to find the PC. */
ae57db63 2173 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
8d302e76
AJ
2174 tb_phys_invalidate(tb, -1);
2175 } else {
2176 /* The exception probably happened in a helper. The CPU state should
2177 have been saved before calling it. Fetch the PC from there. */
2178 CPUArchState *env = cpu->env_ptr;
2179 target_ulong pc, cs_base;
2180 tb_page_addr_t addr;
89fee74a 2181 uint32_t flags;
8d302e76
AJ
2182
2183 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2184 addr = get_page_addr_code(env, pc);
c360a0fd
PM
2185 if (addr != -1) {
2186 tb_invalidate_phys_range(addr, addr + 1);
2187 }
5b6dd868 2188 }
5b6dd868
BS
2189}
2190
2191#ifndef CONFIG_USER_ONLY
5b6dd868 2192/* in deterministic execution mode, instructions doing device I/Os
8d04fb55
JK
2193 * must be at the end of the TB.
2194 *
2195 * Called by softmmu_template.h, with iothread mutex not held.
2196 */
90b40a69 2197void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
5b6dd868 2198{
a47dddd7 2199#if defined(TARGET_MIPS) || defined(TARGET_SH4)
90b40a69 2200 CPUArchState *env = cpu->env_ptr;
a47dddd7 2201#endif
5b6dd868 2202 TranslationBlock *tb;
87f963be 2203 uint32_t n;
5b6dd868 2204
be2cdc5e 2205 tb = tcg_tb_lookup(retaddr);
5b6dd868 2206 if (!tb) {
a47dddd7 2207 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
5b6dd868
BS
2208 (void *)retaddr);
2209 }
afd46fca 2210 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
87f963be 2211
5b6dd868
BS
2212 /* On MIPS and SH, delay slot instructions can only be restarted if
2213 they were already the first instruction in the TB. If this is not
2214 the first instruction in a TB then re-execute the preceding
2215 branch. */
87f963be 2216 n = 1;
5b6dd868 2217#if defined(TARGET_MIPS)
87f963be
RH
2218 if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2219 && env->active_tc.PC != tb->pc) {
c3577479 2220 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
5e140196 2221 cpu_neg(cpu)->icount_decr.u16.low++;
5b6dd868 2222 env->hflags &= ~MIPS_HFLAG_BMASK;
87f963be 2223 n = 2;
5b6dd868
BS
2224 }
2225#elif defined(TARGET_SH4)
2226 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
87f963be 2227 && env->pc != tb->pc) {
5b6dd868 2228 env->pc -= 2;
5e140196 2229 cpu_neg(cpu)->icount_decr.u16.low++;
5b6dd868 2230 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
87f963be 2231 n = 2;
5b6dd868
BS
2232 }
2233#endif
5b6dd868 2234
87f963be
RH
2235 /* Generate a new TB executing the I/O insn. */
2236 cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
9b990ee5 2237
194125e3 2238 if (tb_cflags(tb) & CF_NOCACHE) {
02d57ea1
SF
2239 if (tb->orig_tb) {
2240 /* Invalidate original TB if this TB was generated in
2241 * cpu_exec_nocache() */
2242 tb_phys_invalidate(tb->orig_tb, -1);
2243 }
be2cdc5e 2244 tcg_tb_remove(tb);
938e897a 2245 tb_destroy(tb);
02d57ea1 2246 }
a5e99826 2247
5b6dd868 2248 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
a5e99826
FK
2249 * the first in the TB) then we end up generating a whole new TB and
2250 * repeating the fault, which is horribly inefficient.
2251 * Better would be to execute just this insn uncached, or generate a
2252 * second new TB.
a5e99826 2253 */
6886b980 2254 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
2255}
2256
f3ced3c5 2257static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
5b6dd868 2258{
f3ced3c5 2259 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
5b6dd868 2260
f3ced3c5
EC
2261 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2262 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2263 }
2264}
2265
2266void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2267{
5b6dd868
BS
2268 /* Discard jump cache entries for any tb which might potentially
2269 overlap the flushed page. */
f3ced3c5
EC
2270 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2271 tb_jmp_cache_clear_page(cpu, addr);
5b6dd868
BS
2272}
2273
3de2faa9 2274static void print_qht_statistics(struct qht_stats hst)
7266ae91
EC
2275{
2276 uint32_t hgram_opts;
2277 size_t hgram_bins;
2278 char *hgram;
2279
2280 if (!hst.head_buckets) {
2281 return;
2282 }
3de2faa9 2283 qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
7266ae91
EC
2284 hst.used_head_buckets, hst.head_buckets,
2285 (double)hst.used_head_buckets / hst.head_buckets * 100);
2286
2287 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2288 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
2289 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2290 hgram_opts |= QDIST_PR_NODECIMAL;
2291 }
2292 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
3de2faa9 2293 qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
7266ae91
EC
2294 qdist_avg(&hst.occupancy) * 100, hgram);
2295 g_free(hgram);
2296
2297 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2298 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2299 if (hgram_bins > 10) {
2300 hgram_bins = 10;
2301 } else {
2302 hgram_bins = 0;
2303 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2304 }
2305 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
3de2faa9 2306 qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n",
7266ae91
EC
2307 qdist_avg(&hst.chain), hgram);
2308 g_free(hgram);
2309}
2310
2ac01d6d 2311struct tb_tree_stats {
be2cdc5e 2312 size_t nb_tbs;
f19c6cc6 2313 size_t host_size;
2ac01d6d
EC
2314 size_t target_size;
2315 size_t max_target_size;
2316 size_t direct_jmp_count;
2317 size_t direct_jmp2_count;
2318 size_t cross_page;
2319};
2320
2321static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2322{
2323 const TranslationBlock *tb = value;
2324 struct tb_tree_stats *tst = data;
2325
be2cdc5e 2326 tst->nb_tbs++;
f19c6cc6 2327 tst->host_size += tb->tc.size;
2ac01d6d
EC
2328 tst->target_size += tb->size;
2329 if (tb->size > tst->max_target_size) {
2330 tst->max_target_size = tb->size;
2331 }
2332 if (tb->page_addr[1] != -1) {
2333 tst->cross_page++;
2334 }
2335 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2336 tst->direct_jmp_count++;
2337 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2338 tst->direct_jmp2_count++;
2339 }
2340 }
2341 return false;
2342}
2343
3de2faa9 2344void dump_exec_info(void)
5b6dd868 2345{
2ac01d6d 2346 struct tb_tree_stats tst = {};
329844d4 2347 struct qht_stats hst;
e09de0a2 2348 size_t nb_tbs, flush_full, flush_part, flush_elide;
5b6dd868 2349
be2cdc5e
EC
2350 tcg_tb_foreach(tb_tree_stats_iter, &tst);
2351 nb_tbs = tst.nb_tbs;
5b6dd868 2352 /* XXX: avoid using doubles ? */
3de2faa9 2353 qemu_printf("Translation buffer state:\n");
f19c6cc6
EC
2354 /*
2355 * Report total code size including the padding and TB structs;
2356 * otherwise users might think "-tb-size" is not honoured.
2357 * For avg host size we use the precise numbers from tb_tree_stats though.
2358 */
3de2faa9 2359 qemu_printf("gen code size %zu/%zu\n",
e8feb96f 2360 tcg_code_size(), tcg_code_capacity());
3de2faa9
MA
2361 qemu_printf("TB count %zu\n", nb_tbs);
2362 qemu_printf("TB avg target size %zu max=%zu bytes\n",
2ac01d6d
EC
2363 nb_tbs ? tst.target_size / nb_tbs : 0,
2364 tst.max_target_size);
3de2faa9 2365 qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
f19c6cc6
EC
2366 nb_tbs ? tst.host_size / nb_tbs : 0,
2367 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
3de2faa9
MA
2368 qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2369 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2370 qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2ac01d6d
EC
2371 tst.direct_jmp_count,
2372 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2373 tst.direct_jmp2_count,
2374 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
329844d4 2375
44ded3d0 2376 qht_statistics_init(&tb_ctx.htable, &hst);
3de2faa9 2377 print_qht_statistics(hst);
329844d4
EC
2378 qht_statistics_destroy(&hst);
2379
3de2faa9
MA
2380 qemu_printf("\nStatistics:\n");
2381 qemu_printf("TB flush count %u\n",
44ded3d0 2382 atomic_read(&tb_ctx.tb_flush_count));
3de2faa9
MA
2383 qemu_printf("TB invalidate count %zu\n",
2384 tcg_tb_phys_invalidate_count());
e09de0a2
RH
2385
2386 tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
3de2faa9
MA
2387 qemu_printf("TLB full flushes %zu\n", flush_full);
2388 qemu_printf("TLB partial flushes %zu\n", flush_part);
2389 qemu_printf("TLB elided flushes %zu\n", flush_elide);
2390 tcg_dump_info();
5b6dd868
BS
2391}
2392
d4c51a0a 2393void dump_opcount_info(void)
246ae24d 2394{
d4c51a0a 2395 tcg_dump_op_count();
246ae24d
MF
2396}
2397
5b6dd868
BS
2398#else /* CONFIG_USER_ONLY */
2399
c3affe56 2400void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 2401{
8d04fb55 2402 g_assert(qemu_mutex_iothread_locked());
259186a7 2403 cpu->interrupt_request |= mask;
5e140196 2404 atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
5b6dd868
BS
2405}
2406
2407/*
2408 * Walks guest process memory "regions" one by one
2409 * and calls callback function 'fn' for each region.
2410 */
2411struct walk_memory_regions_data {
2412 walk_memory_regions_fn fn;
2413 void *priv;
1a1c4db9 2414 target_ulong start;
5b6dd868
BS
2415 int prot;
2416};
2417
2418static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1a1c4db9 2419 target_ulong end, int new_prot)
5b6dd868 2420{
1a1c4db9 2421 if (data->start != -1u) {
5b6dd868
BS
2422 int rc = data->fn(data->priv, data->start, end, data->prot);
2423 if (rc != 0) {
2424 return rc;
2425 }
2426 }
2427
1a1c4db9 2428 data->start = (new_prot ? end : -1u);
5b6dd868
BS
2429 data->prot = new_prot;
2430
2431 return 0;
2432}
2433
2434static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1a1c4db9 2435 target_ulong base, int level, void **lp)
5b6dd868 2436{
1a1c4db9 2437 target_ulong pa;
5b6dd868
BS
2438 int i, rc;
2439
2440 if (*lp == NULL) {
2441 return walk_memory_regions_end(data, base, 0);
2442 }
2443
2444 if (level == 0) {
2445 PageDesc *pd = *lp;
2446
03f49957 2447 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
2448 int prot = pd[i].flags;
2449
2450 pa = base | (i << TARGET_PAGE_BITS);
2451 if (prot != data->prot) {
2452 rc = walk_memory_regions_end(data, pa, prot);
2453 if (rc != 0) {
2454 return rc;
2455 }
2456 }
2457 }
2458 } else {
2459 void **pp = *lp;
2460
03f49957 2461 for (i = 0; i < V_L2_SIZE; ++i) {
1a1c4db9 2462 pa = base | ((target_ulong)i <<
03f49957 2463 (TARGET_PAGE_BITS + V_L2_BITS * level));
5b6dd868
BS
2464 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2465 if (rc != 0) {
2466 return rc;
2467 }
2468 }
2469 }
2470
2471 return 0;
2472}
2473
2474int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2475{
2476 struct walk_memory_regions_data data;
66ec9f49 2477 uintptr_t i, l1_sz = v_l1_size;
5b6dd868
BS
2478
2479 data.fn = fn;
2480 data.priv = priv;
1a1c4db9 2481 data.start = -1u;
5b6dd868
BS
2482 data.prot = 0;
2483
66ec9f49
VK
2484 for (i = 0; i < l1_sz; i++) {
2485 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2486 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
5b6dd868
BS
2487 if (rc != 0) {
2488 return rc;
2489 }
2490 }
2491
2492 return walk_memory_regions_end(&data, 0, 0);
2493}
2494
1a1c4db9
MI
2495static int dump_region(void *priv, target_ulong start,
2496 target_ulong end, unsigned long prot)
5b6dd868
BS
2497{
2498 FILE *f = (FILE *)priv;
2499
1a1c4db9
MI
2500 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2501 " "TARGET_FMT_lx" %c%c%c\n",
5b6dd868
BS
2502 start, end, end - start,
2503 ((prot & PAGE_READ) ? 'r' : '-'),
2504 ((prot & PAGE_WRITE) ? 'w' : '-'),
2505 ((prot & PAGE_EXEC) ? 'x' : '-'));
2506
2507 return 0;
2508}
2509
2510/* dump memory mappings */
2511void page_dump(FILE *f)
2512{
1a1c4db9 2513 const int length = sizeof(target_ulong) * 2;
227b8175
SW
2514 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2515 length, "start", length, "end", length, "size", "prot");
5b6dd868
BS
2516 walk_memory_regions(f, dump_region);
2517}
2518
2519int page_get_flags(target_ulong address)
2520{
2521 PageDesc *p;
2522
2523 p = page_find(address >> TARGET_PAGE_BITS);
2524 if (!p) {
2525 return 0;
2526 }
2527 return p->flags;
2528}
2529
2530/* Modify the flags of a page and invalidate the code if necessary.
2531 The flag PAGE_WRITE_ORG is positioned automatically depending
2532 on PAGE_WRITE. The mmap_lock should already be held. */
2533void page_set_flags(target_ulong start, target_ulong end, int flags)
2534{
2535 target_ulong addr, len;
2536
2537 /* This function should never be called with addresses outside the
2538 guest address space. If this assert fires, it probably indicates
2539 a missing call to h2g_valid. */
7d8cbbab 2540 assert(end - 1 <= GUEST_ADDR_MAX);
5b6dd868 2541 assert(start < end);
e505a063 2542 assert_memory_lock();
5b6dd868
BS
2543
2544 start = start & TARGET_PAGE_MASK;
2545 end = TARGET_PAGE_ALIGN(end);
2546
2547 if (flags & PAGE_WRITE) {
2548 flags |= PAGE_WRITE_ORG;
2549 }
2550
2551 for (addr = start, len = end - start;
2552 len != 0;
2553 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2554 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2555
2556 /* If the write protection bit is set, then we invalidate
2557 the code inside. */
2558 if (!(p->flags & PAGE_WRITE) &&
2559 (flags & PAGE_WRITE) &&
2560 p->first_tb) {
75809229 2561 tb_invalidate_phys_page(addr, 0);
5b6dd868
BS
2562 }
2563 p->flags = flags;
2564 }
2565}
2566
2567int page_check_range(target_ulong start, target_ulong len, int flags)
2568{
2569 PageDesc *p;
2570 target_ulong end;
2571 target_ulong addr;
2572
2573 /* This function should never be called with addresses outside the
2574 guest address space. If this assert fires, it probably indicates
2575 a missing call to h2g_valid. */
2576#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 2577 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
2578#endif
2579
2580 if (len == 0) {
2581 return 0;
2582 }
2583 if (start + len - 1 < start) {
2584 /* We've wrapped around. */
2585 return -1;
2586 }
2587
2588 /* must do before we loose bits in the next step */
2589 end = TARGET_PAGE_ALIGN(start + len);
2590 start = start & TARGET_PAGE_MASK;
2591
2592 for (addr = start, len = end - start;
2593 len != 0;
2594 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2595 p = page_find(addr >> TARGET_PAGE_BITS);
2596 if (!p) {
2597 return -1;
2598 }
2599 if (!(p->flags & PAGE_VALID)) {
2600 return -1;
2601 }
2602
2603 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2604 return -1;
2605 }
2606 if (flags & PAGE_WRITE) {
2607 if (!(p->flags & PAGE_WRITE_ORG)) {
2608 return -1;
2609 }
2610 /* unprotect the page if it was put read-only because it
2611 contains translated code */
2612 if (!(p->flags & PAGE_WRITE)) {
f213e72f 2613 if (!page_unprotect(addr, 0)) {
5b6dd868
BS
2614 return -1;
2615 }
2616 }
5b6dd868
BS
2617 }
2618 }
2619 return 0;
2620}
2621
2622/* called from signal handler: invalidate the code and unprotect the
f213e72f
PM
2623 * page. Return 0 if the fault was not handled, 1 if it was handled,
2624 * and 2 if it was handled but the caller must cause the TB to be
2625 * immediately exited. (We can only return 2 if the 'pc' argument is
2626 * non-zero.)
2627 */
2628int page_unprotect(target_ulong address, uintptr_t pc)
5b6dd868
BS
2629{
2630 unsigned int prot;
7399a337 2631 bool current_tb_invalidated;
5b6dd868
BS
2632 PageDesc *p;
2633 target_ulong host_start, host_end, addr;
2634
2635 /* Technically this isn't safe inside a signal handler. However we
2636 know this only ever happens in a synchronous SEGV handler, so in
2637 practice it seems to be ok. */
2638 mmap_lock();
2639
2640 p = page_find(address >> TARGET_PAGE_BITS);
2641 if (!p) {
2642 mmap_unlock();
2643 return 0;
2644 }
2645
2646 /* if the page was really writable, then we change its
2647 protection back to writable */
9c4bbee9 2648 if (p->flags & PAGE_WRITE_ORG) {
7399a337 2649 current_tb_invalidated = false;
9c4bbee9
PM
2650 if (p->flags & PAGE_WRITE) {
2651 /* If the page is actually marked WRITE then assume this is because
2652 * this thread raced with another one which got here first and
2653 * set the page to PAGE_WRITE and did the TB invalidate for us.
2654 */
2655#ifdef TARGET_HAS_PRECISE_SMC
be2cdc5e 2656 TranslationBlock *current_tb = tcg_tb_lookup(pc);
9c4bbee9
PM
2657 if (current_tb) {
2658 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
6eb062ab 2659 }
5b6dd868 2660#endif
9c4bbee9
PM
2661 } else {
2662 host_start = address & qemu_host_page_mask;
2663 host_end = host_start + qemu_host_page_size;
2664
2665 prot = 0;
2666 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2667 p = page_find(addr >> TARGET_PAGE_BITS);
2668 p->flags |= PAGE_WRITE;
2669 prot |= p->flags;
2670
2671 /* and since the content will be modified, we must invalidate
2672 the corresponding translated code. */
2673 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2674#ifdef CONFIG_USER_ONLY
2675 if (DEBUG_TB_CHECK_GATE) {
2676 tb_invalidate_check(addr);
2677 }
2678#endif
2679 }
2680 mprotect((void *)g2h(host_start), qemu_host_page_size,
2681 prot & PAGE_BITS);
5b6dd868 2682 }
5b6dd868 2683 mmap_unlock();
7399a337
SS
2684 /* If current TB was invalidated return to main loop */
2685 return current_tb_invalidated ? 2 : 1;
5b6dd868
BS
2686 }
2687 mmap_unlock();
2688 return 0;
2689}
2690#endif /* CONFIG_USER_ONLY */
2cd53943
TH
2691
2692/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2693void tcg_flush_softmmu_tlb(CPUState *cs)
2694{
2695#ifdef CONFIG_SOFTMMU
2696 tlb_flush(cs);
2697#endif
2698}
This page took 1.476782 seconds and 4 git commands to generate.