]> Git Repo - qemu.git/blame - accel/tcg/translate-all.c
tcg: Pass generic CPUState to gen_intermediate_code()
[qemu.git] / accel / tcg / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
5b6dd868
BS
19#ifdef _WIN32
20#include <windows.h>
5b6dd868 21#endif
7b31bbc2 22#include "qemu/osdep.h"
d19893da 23
2054396a 24
5b6dd868 25#include "qemu-common.h"
af5ad107 26#define NO_CPU_IO_DEFS
d3eead2e 27#include "cpu.h"
244f1441 28#include "trace.h"
76cad711 29#include "disas/disas.h"
63c91552 30#include "exec/exec-all.h"
57fec1fe 31#include "tcg.h"
5b6dd868
BS
32#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
301e40ed 34#include "exec/exec-all.h"
5b6dd868
BS
35#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36#include <sys/param.h>
37#if __FreeBSD_version >= 700104
38#define HAVE_KINFO_GETVMMAP
39#define sigqueue sigqueue_freebsd /* avoid redefinition */
5b6dd868
BS
40#include <sys/proc.h>
41#include <machine/profile.h>
42#define _KERNEL
43#include <sys/user.h>
44#undef _KERNEL
45#undef sigqueue
46#include <libutil.h>
47#endif
48#endif
0bc3cd62
PB
49#else
50#include "exec/address-spaces.h"
5b6dd868
BS
51#endif
52
022c62cb 53#include "exec/cputlb.h"
e1b89321 54#include "exec/tb-hash.h"
5b6dd868 55#include "translate-all.h"
510a647f 56#include "qemu/bitmap.h"
61a67f71 57#include "qemu/error-report.h"
0aa09897 58#include "qemu/timer.h"
8d04fb55 59#include "qemu/main-loop.h"
508127e2 60#include "exec/log.h"
d2528bdc 61#include "sysemu/cpus.h"
5b6dd868 62
955939a2
AB
63/* #define DEBUG_TB_INVALIDATE */
64/* #define DEBUG_TB_FLUSH */
5b6dd868 65/* make various TB consistency checks */
955939a2 66/* #define DEBUG_TB_CHECK */
5b6dd868
BS
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
301e40ed
AB
73/* Access to the various translations structures need to be serialised via locks
74 * for consistency. This is automatic for SoftMMU based system
75 * emulation due to its single threaded nature. In user-mode emulation
76 * access to the memory related structures are protected with the
77 * mmap_lock.
78 */
301e40ed 79#ifdef CONFIG_SOFTMMU
2f169606 80#define assert_memory_lock() tcg_debug_assert(have_tb_lock)
301e40ed 81#else
6ac3d7e8 82#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
301e40ed
AB
83#endif
84
5b6dd868
BS
85#define SMC_BITMAP_USE_THRESHOLD 10
86
5b6dd868
BS
87typedef struct PageDesc {
88 /* list of TBs intersecting this ram page */
89 TranslationBlock *first_tb;
6fad459c 90#ifdef CONFIG_SOFTMMU
5b6dd868
BS
91 /* in order to optimize self modifying code, we count the number
92 of lookups we do to a given page to use a bitmap */
93 unsigned int code_write_count;
510a647f 94 unsigned long *code_bitmap;
6fad459c 95#else
5b6dd868
BS
96 unsigned long flags;
97#endif
98} PageDesc;
99
100/* In system mode we want L1_MAP to be based on ram offsets,
101 while in user mode we want it to be based on virtual addresses. */
102#if !defined(CONFIG_USER_ONLY)
103#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
104# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
105#else
106# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
107#endif
108#else
109# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
110#endif
111
03f49957
PB
112/* Size of the L2 (and L3, etc) page tables. */
113#define V_L2_BITS 10
114#define V_L2_SIZE (1 << V_L2_BITS)
115
61a67f71
LV
116/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
117QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
118 sizeof(((TranslationBlock *)0)->trace_vcpu_dstate)
119 * BITS_PER_BYTE);
120
66ec9f49
VK
121/*
122 * L1 Mapping properties
123 */
124static int v_l1_size;
125static int v_l1_shift;
126static int v_l2_levels;
127
128/* The bottom level has pointers to PageDesc, and is indexed by
129 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
130 */
131#define V_L1_MIN_BITS 4
132#define V_L1_MAX_BITS (V_L2_BITS + 3)
133#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
134
135static void *l1_map[V_L1_MAX_SIZE];
5b6dd868 136
57fec1fe
FB
137/* code generation context */
138TCGContext tcg_ctx;
fdbc2b57 139bool parallel_cpus;
d19893da 140
677ef623 141/* translation block context */
677ef623 142__thread int have_tb_lock;
677ef623 143
66ec9f49
VK
144static void page_table_config_init(void)
145{
146 uint32_t v_l1_bits;
147
148 assert(TARGET_PAGE_BITS);
149 /* The bits remaining after N lower levels of page tables. */
150 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
151 if (v_l1_bits < V_L1_MIN_BITS) {
152 v_l1_bits += V_L2_BITS;
153 }
154
155 v_l1_size = 1 << v_l1_bits;
156 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
157 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
158
159 assert(v_l1_bits <= V_L1_MAX_BITS);
160 assert(v_l1_shift % V_L2_BITS == 0);
161 assert(v_l2_levels >= 0);
162}
163
6ac3d7e8
PK
164#define assert_tb_locked() tcg_debug_assert(have_tb_lock)
165#define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
6ac3d7e8 166
677ef623
FK
167void tb_lock(void)
168{
6ac3d7e8 169 assert_tb_unlocked();
677ef623
FK
170 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
171 have_tb_lock++;
677ef623
FK
172}
173
174void tb_unlock(void)
175{
6ac3d7e8 176 assert_tb_locked();
677ef623
FK
177 have_tb_lock--;
178 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
677ef623
FK
179}
180
181void tb_lock_reset(void)
182{
677ef623
FK
183 if (have_tb_lock) {
184 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
185 have_tb_lock = 0;
186 }
677ef623
FK
187}
188
a8a826a3 189static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
5b6dd868 190
57fec1fe
FB
191void cpu_gen_init(void)
192{
193 tcg_context_init(&tcg_ctx);
57fec1fe
FB
194}
195
fca8a500
RH
196/* Encode VAL as a signed leb128 sequence at P.
197 Return P incremented past the encoded value. */
198static uint8_t *encode_sleb128(uint8_t *p, target_long val)
199{
200 int more, byte;
201
202 do {
203 byte = val & 0x7f;
204 val >>= 7;
205 more = !((val == 0 && (byte & 0x40) == 0)
206 || (val == -1 && (byte & 0x40) != 0));
207 if (more) {
208 byte |= 0x80;
209 }
210 *p++ = byte;
211 } while (more);
212
213 return p;
214}
215
216/* Decode a signed leb128 sequence at *PP; increment *PP past the
217 decoded value. Return the decoded value. */
218static target_long decode_sleb128(uint8_t **pp)
219{
220 uint8_t *p = *pp;
221 target_long val = 0;
222 int byte, shift = 0;
223
224 do {
225 byte = *p++;
226 val |= (target_ulong)(byte & 0x7f) << shift;
227 shift += 7;
228 } while (byte & 0x80);
229 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
230 val |= -(target_ulong)1 << shift;
231 }
232
233 *pp = p;
234 return val;
235}
236
237/* Encode the data collected about the instructions while compiling TB.
238 Place the data at BLOCK, and return the number of bytes consumed.
239
240 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
241 which come from the target's insn_start data, followed by a uintptr_t
242 which comes from the host pc of the end of the code implementing the insn.
243
244 Each line of the table is encoded as sleb128 deltas from the previous
245 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
246 That is, the first column is seeded with the guest pc, the last column
247 with the host pc, and the middle columns with zeros. */
248
249static int encode_search(TranslationBlock *tb, uint8_t *block)
250{
b125f9dc 251 uint8_t *highwater = tcg_ctx.code_gen_highwater;
fca8a500
RH
252 uint8_t *p = block;
253 int i, j, n;
254
255 tb->tc_search = block;
256
257 for (i = 0, n = tb->icount; i < n; ++i) {
258 target_ulong prev;
259
260 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
261 if (i == 0) {
262 prev = (j == 0 ? tb->pc : 0);
263 } else {
264 prev = tcg_ctx.gen_insn_data[i - 1][j];
265 }
266 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
267 }
268 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
269 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
b125f9dc
RH
270
271 /* Test for (pending) buffer overflow. The assumption is that any
272 one row beginning below the high water mark cannot overrun
273 the buffer completely. Thus we can test for overflow after
274 encoding a row without having to check during encoding. */
275 if (unlikely(p > highwater)) {
276 return -1;
277 }
fca8a500
RH
278 }
279
280 return p - block;
281}
282
7d7500d9
PB
283/* The cpu state corresponding to 'searched_pc' is restored.
284 * Called with tb_lock held.
285 */
74f10515 286static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
a8a826a3 287 uintptr_t searched_pc)
d19893da 288{
fca8a500
RH
289 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
290 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
74f10515 291 CPUArchState *env = cpu->env_ptr;
fca8a500
RH
292 uint8_t *p = tb->tc_search;
293 int i, j, num_insns = tb->icount;
57fec1fe 294#ifdef CONFIG_PROFILER
fca8a500 295 int64_t ti = profile_getclock();
57fec1fe
FB
296#endif
297
01ecaf43
RH
298 searched_pc -= GETPC_ADJ;
299
fca8a500
RH
300 if (searched_pc < host_pc) {
301 return -1;
302 }
d19893da 303
fca8a500
RH
304 /* Reconstruct the stored insn data while looking for the point at
305 which the end of the insn exceeds the searched_pc. */
306 for (i = 0; i < num_insns; ++i) {
307 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
308 data[j] += decode_sleb128(&p);
309 }
310 host_pc += decode_sleb128(&p);
311 if (host_pc > searched_pc) {
312 goto found;
313 }
314 }
315 return -1;
3b46e624 316
fca8a500 317 found:
bd79255d 318 if (tb->cflags & CF_USE_ICOUNT) {
414b15c9 319 assert(use_icount);
2e70f6ef 320 /* Reset the cycle counter to the start of the block. */
fca8a500 321 cpu->icount_decr.u16.low += num_insns;
2e70f6ef 322 /* Clear the IO flag. */
99df7dce 323 cpu->can_do_io = 0;
2e70f6ef 324 }
fca8a500
RH
325 cpu->icount_decr.u16.low -= i;
326 restore_state_to_opc(env, tb, data);
57fec1fe
FB
327
328#ifdef CONFIG_PROFILER
fca8a500
RH
329 tcg_ctx.restore_time += profile_getclock() - ti;
330 tcg_ctx.restore_count++;
57fec1fe 331#endif
d19893da
FB
332 return 0;
333}
5b6dd868 334
3f38f309 335bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
a8a826a3
BS
336{
337 TranslationBlock *tb;
a5e99826 338 bool r = false;
a8a826a3 339
d8b2239b
AB
340 /* A retaddr of zero is invalid so we really shouldn't have ended
341 * up here. The target code has likely forgotten to check retaddr
342 * != 0 before attempting to restore state. We return early to
343 * avoid blowing up on a recursive tb_lock(). The target must have
344 * previously survived a failed cpu_restore_state because
345 * tb_find_pc(0) would have failed anyway. It still should be
346 * fixed though.
347 */
348
349 if (!retaddr) {
350 return r;
351 }
352
a5e99826 353 tb_lock();
a8a826a3
BS
354 tb = tb_find_pc(retaddr);
355 if (tb) {
74f10515 356 cpu_restore_state_from_tb(cpu, tb, retaddr);
d8a499f1
PD
357 if (tb->cflags & CF_NOCACHE) {
358 /* one-shot translation, invalidate it immediately */
d8a499f1
PD
359 tb_phys_invalidate(tb, -1);
360 tb_free(tb);
361 }
a5e99826 362 r = true;
a8a826a3 363 }
a5e99826
FK
364 tb_unlock();
365
366 return r;
a8a826a3
BS
367}
368
47c16ed5
AK
369static void page_init(void)
370{
371 page_size_init();
66ec9f49
VK
372 page_table_config_init();
373
5b6dd868
BS
374#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
375 {
376#ifdef HAVE_KINFO_GETVMMAP
377 struct kinfo_vmentry *freep;
378 int i, cnt;
379
380 freep = kinfo_getvmmap(getpid(), &cnt);
381 if (freep) {
382 mmap_lock();
383 for (i = 0; i < cnt; i++) {
384 unsigned long startaddr, endaddr;
385
386 startaddr = freep[i].kve_start;
387 endaddr = freep[i].kve_end;
388 if (h2g_valid(startaddr)) {
389 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
390
391 if (h2g_valid(endaddr)) {
392 endaddr = h2g(endaddr);
393 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
394 } else {
395#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
396 endaddr = ~0ul;
397 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
398#endif
399 }
400 }
401 }
402 free(freep);
403 mmap_unlock();
404 }
405#else
406 FILE *f;
407
408 last_brk = (unsigned long)sbrk(0);
409
410 f = fopen("/compat/linux/proc/self/maps", "r");
411 if (f) {
412 mmap_lock();
413
414 do {
415 unsigned long startaddr, endaddr;
416 int n;
417
418 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
419
420 if (n == 2 && h2g_valid(startaddr)) {
421 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
422
423 if (h2g_valid(endaddr)) {
424 endaddr = h2g(endaddr);
425 } else {
426 endaddr = ~0ul;
427 }
428 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
429 }
430 } while (!feof(f));
431
432 fclose(f);
433 mmap_unlock();
434 }
435#endif
436 }
437#endif
438}
439
75692087 440/* If alloc=1:
7d7500d9 441 * Called with tb_lock held for system emulation.
75692087
PB
442 * Called with mmap_lock held for user-mode emulation.
443 */
5b6dd868
BS
444static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
445{
446 PageDesc *pd;
447 void **lp;
448 int i;
449
e505a063
AB
450 if (alloc) {
451 assert_memory_lock();
452 }
453
5b6dd868 454 /* Level 1. Always allocated. */
66ec9f49 455 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
5b6dd868
BS
456
457 /* Level 2..N-1. */
66ec9f49 458 for (i = v_l2_levels; i > 0; i--) {
6940fab8 459 void **p = atomic_rcu_read(lp);
5b6dd868
BS
460
461 if (p == NULL) {
462 if (!alloc) {
463 return NULL;
464 }
e3a0abfd 465 p = g_new0(void *, V_L2_SIZE);
6940fab8 466 atomic_rcu_set(lp, p);
5b6dd868
BS
467 }
468
03f49957 469 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
5b6dd868
BS
470 }
471
6940fab8 472 pd = atomic_rcu_read(lp);
5b6dd868
BS
473 if (pd == NULL) {
474 if (!alloc) {
475 return NULL;
476 }
e3a0abfd 477 pd = g_new0(PageDesc, V_L2_SIZE);
6940fab8 478 atomic_rcu_set(lp, pd);
5b6dd868
BS
479 }
480
03f49957 481 return pd + (index & (V_L2_SIZE - 1));
5b6dd868
BS
482}
483
484static inline PageDesc *page_find(tb_page_addr_t index)
485{
486 return page_find_alloc(index, 0);
487}
488
5b6dd868
BS
489#if defined(CONFIG_USER_ONLY)
490/* Currently it is not recommended to allocate big chunks of data in
491 user mode. It will change when a dedicated libc will be used. */
492/* ??? 64-bit hosts ought to have no problem mmaping data outside the
493 region in which the guest needs to run. Revisit this. */
494#define USE_STATIC_CODE_GEN_BUFFER
495#endif
496
5b6dd868
BS
497/* Minimum size of the code gen buffer. This number is randomly chosen,
498 but not so small that we can't have a fair number of TB's live. */
499#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
500
501/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
502 indicated, this is constrained by the range of direct branches on the
503 host cpu, as used by the TCG implementation of goto_tb. */
504#if defined(__x86_64__)
505# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
506#elif defined(__sparc__)
507# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
5bfd75a3
RH
508#elif defined(__powerpc64__)
509# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
399f1648
SF
510#elif defined(__powerpc__)
511# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
4a136e0a 512#elif defined(__aarch64__)
b68686bd 513# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
5b6dd868
BS
514#elif defined(__s390x__)
515 /* We have a +- 4GB range on the branches; leave some slop. */
516# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
479eb121
RH
517#elif defined(__mips__)
518 /* We have a 256MB branch region, but leave room to make sure the
519 main executable is also within that region. */
520# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
521#else
522# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
523#endif
524
525#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
526
527#define DEFAULT_CODE_GEN_BUFFER_SIZE \
528 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
529 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
530
531static inline size_t size_code_gen_buffer(size_t tb_size)
532{
533 /* Size the buffer. */
534 if (tb_size == 0) {
535#ifdef USE_STATIC_CODE_GEN_BUFFER
536 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
537#else
538 /* ??? Needs adjustments. */
539 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
540 static buffer, we could size this on RESERVED_VA, on the text
541 segment size of the executable, or continue to use the default. */
542 tb_size = (unsigned long)(ram_size / 4);
543#endif
544 }
545 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
546 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
547 }
548 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
549 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
550 }
5b6dd868
BS
551 return tb_size;
552}
553
483c76e1
RH
554#ifdef __mips__
555/* In order to use J and JAL within the code_gen_buffer, we require
556 that the buffer not cross a 256MB boundary. */
557static inline bool cross_256mb(void *addr, size_t size)
558{
7ba6a512 559 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
483c76e1
RH
560}
561
562/* We weren't able to allocate a buffer without crossing that boundary,
563 so make do with the larger portion of the buffer that doesn't cross.
564 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
565static inline void *split_cross_256mb(void *buf1, size_t size1)
566{
7ba6a512 567 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
483c76e1
RH
568 size_t size2 = buf1 + size1 - buf2;
569
570 size1 = buf2 - buf1;
571 if (size1 < size2) {
572 size1 = size2;
573 buf1 = buf2;
574 }
575
576 tcg_ctx.code_gen_buffer_size = size1;
577 return buf1;
578}
579#endif
580
5b6dd868
BS
581#ifdef USE_STATIC_CODE_GEN_BUFFER
582static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
583 __attribute__((aligned(CODE_GEN_ALIGN)));
584
f293709c
RH
585# ifdef _WIN32
586static inline void do_protect(void *addr, long size, int prot)
587{
588 DWORD old_protect;
589 VirtualProtect(addr, size, prot, &old_protect);
590}
591
592static inline void map_exec(void *addr, long size)
593{
594 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
595}
596
597static inline void map_none(void *addr, long size)
598{
599 do_protect(addr, size, PAGE_NOACCESS);
600}
601# else
602static inline void do_protect(void *addr, long size, int prot)
603{
604 uintptr_t start, end;
605
606 start = (uintptr_t)addr;
607 start &= qemu_real_host_page_mask;
608
609 end = (uintptr_t)addr + size;
610 end = ROUND_UP(end, qemu_real_host_page_size);
611
612 mprotect((void *)start, end - start, prot);
613}
614
615static inline void map_exec(void *addr, long size)
616{
617 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
618}
619
620static inline void map_none(void *addr, long size)
621{
622 do_protect(addr, size, PROT_NONE);
623}
624# endif /* WIN32 */
625
5b6dd868
BS
626static inline void *alloc_code_gen_buffer(void)
627{
483c76e1 628 void *buf = static_code_gen_buffer;
f293709c
RH
629 size_t full_size, size;
630
631 /* The size of the buffer, rounded down to end on a page boundary. */
632 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
633 & qemu_real_host_page_mask) - (uintptr_t)buf;
634
635 /* Reserve a guard page. */
636 size = full_size - qemu_real_host_page_size;
637
638 /* Honor a command-line option limiting the size of the buffer. */
639 if (size > tcg_ctx.code_gen_buffer_size) {
640 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
641 & qemu_real_host_page_mask) - (uintptr_t)buf;
642 }
643 tcg_ctx.code_gen_buffer_size = size;
644
483c76e1 645#ifdef __mips__
f293709c
RH
646 if (cross_256mb(buf, size)) {
647 buf = split_cross_256mb(buf, size);
648 size = tcg_ctx.code_gen_buffer_size;
483c76e1
RH
649 }
650#endif
f293709c
RH
651
652 map_exec(buf, size);
653 map_none(buf + size, qemu_real_host_page_size);
654 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
655
483c76e1 656 return buf;
5b6dd868 657}
f293709c
RH
658#elif defined(_WIN32)
659static inline void *alloc_code_gen_buffer(void)
660{
661 size_t size = tcg_ctx.code_gen_buffer_size;
662 void *buf1, *buf2;
663
664 /* Perform the allocation in two steps, so that the guard page
665 is reserved but uncommitted. */
666 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
667 MEM_RESERVE, PAGE_NOACCESS);
668 if (buf1 != NULL) {
669 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
670 assert(buf1 == buf2);
671 }
672
673 return buf1;
674}
675#else
5b6dd868
BS
676static inline void *alloc_code_gen_buffer(void)
677{
678 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
679 uintptr_t start = 0;
f293709c 680 size_t size = tcg_ctx.code_gen_buffer_size;
5b6dd868
BS
681 void *buf;
682
683 /* Constrain the position of the buffer based on the host cpu.
684 Note that these addresses are chosen in concert with the
685 addresses assigned in the relevant linker script file. */
686# if defined(__PIE__) || defined(__PIC__)
687 /* Don't bother setting a preferred location if we're building
688 a position-independent executable. We're more likely to get
689 an address near the main executable if we let the kernel
690 choose the address. */
691# elif defined(__x86_64__) && defined(MAP_32BIT)
692 /* Force the memory down into low memory with the executable.
693 Leave the choice of exact location with the kernel. */
694 flags |= MAP_32BIT;
695 /* Cannot expect to map more than 800MB in low memory. */
f293709c
RH
696 if (size > 800u * 1024 * 1024) {
697 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
5b6dd868
BS
698 }
699# elif defined(__sparc__)
700 start = 0x40000000ul;
701# elif defined(__s390x__)
702 start = 0x90000000ul;
479eb121 703# elif defined(__mips__)
f293709c 704# if _MIPS_SIM == _ABI64
479eb121
RH
705 start = 0x128000000ul;
706# else
707 start = 0x08000000ul;
708# endif
5b6dd868
BS
709# endif
710
f293709c
RH
711 buf = mmap((void *)start, size + qemu_real_host_page_size,
712 PROT_NONE, flags, -1, 0);
483c76e1
RH
713 if (buf == MAP_FAILED) {
714 return NULL;
715 }
716
717#ifdef __mips__
f293709c 718 if (cross_256mb(buf, size)) {
5d831be2 719 /* Try again, with the original still mapped, to avoid re-acquiring
483c76e1 720 that 256mb crossing. This time don't specify an address. */
f293709c
RH
721 size_t size2;
722 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
723 PROT_NONE, flags, -1, 0);
f68808c7 724 switch ((int)(buf2 != MAP_FAILED)) {
f293709c
RH
725 case 1:
726 if (!cross_256mb(buf2, size)) {
483c76e1 727 /* Success! Use the new buffer. */
8bdf4997 728 munmap(buf, size + qemu_real_host_page_size);
f293709c 729 break;
483c76e1
RH
730 }
731 /* Failure. Work with what we had. */
8bdf4997 732 munmap(buf2, size + qemu_real_host_page_size);
f293709c
RH
733 /* fallthru */
734 default:
735 /* Split the original buffer. Free the smaller half. */
736 buf2 = split_cross_256mb(buf, size);
737 size2 = tcg_ctx.code_gen_buffer_size;
738 if (buf == buf2) {
739 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
740 } else {
741 munmap(buf, size - size2);
742 }
743 size = size2;
744 break;
483c76e1 745 }
f293709c 746 buf = buf2;
483c76e1
RH
747 }
748#endif
749
f293709c
RH
750 /* Make the final buffer accessible. The guard page at the end
751 will remain inaccessible with PROT_NONE. */
752 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
483c76e1 753
f293709c
RH
754 /* Request large pages for the buffer. */
755 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
483c76e1 756
5b6dd868
BS
757 return buf;
758}
f293709c 759#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
5b6dd868
BS
760
761static inline void code_gen_alloc(size_t tb_size)
762{
0b0d3320
EV
763 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
764 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
765 if (tcg_ctx.code_gen_buffer == NULL) {
5b6dd868
BS
766 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
767 exit(1);
768 }
769
6e3b2bfd
EC
770 /* size this conservatively -- realloc later if needed */
771 tcg_ctx.tb_ctx.tbs_size =
772 tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE / 8;
773 if (unlikely(!tcg_ctx.tb_ctx.tbs_size)) {
774 tcg_ctx.tb_ctx.tbs_size = 64 * 1024;
775 }
776 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock *, tcg_ctx.tb_ctx.tbs_size);
8163b749 777
677ef623 778 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
5b6dd868
BS
779}
780
909eaac9
EC
781static void tb_htable_init(void)
782{
783 unsigned int mode = QHT_MODE_AUTO_RESIZE;
784
785 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
786}
787
5b6dd868
BS
788/* Must be called before using the QEMU cpus. 'tb_size' is the size
789 (in bytes) allocated to the translation buffer. Zero means default
790 size. */
791void tcg_exec_init(unsigned long tb_size)
792{
8e2b7299 793 tcg_allowed = true;
5b6dd868 794 cpu_gen_init();
5b6dd868 795 page_init();
909eaac9 796 tb_htable_init();
f293709c 797 code_gen_alloc(tb_size);
4cbea598 798#if defined(CONFIG_SOFTMMU)
5b6dd868
BS
799 /* There's no guest base to take into account, so go ahead and
800 initialize the prologue now. */
801 tcg_prologue_init(&tcg_ctx);
802#endif
803}
804
7d7500d9
PB
805/*
806 * Allocate a new translation block. Flush the translation buffer if
807 * too many translation blocks or too much generated code.
808 *
809 * Called with tb_lock held.
810 */
5b6dd868
BS
811static TranslationBlock *tb_alloc(target_ulong pc)
812{
813 TranslationBlock *tb;
6e3b2bfd 814 TBContext *ctx;
5b6dd868 815
6ac3d7e8 816 assert_tb_locked();
e505a063 817
6e3b2bfd
EC
818 tb = tcg_tb_alloc(&tcg_ctx);
819 if (unlikely(tb == NULL)) {
5b6dd868
BS
820 return NULL;
821 }
6e3b2bfd
EC
822 ctx = &tcg_ctx.tb_ctx;
823 if (unlikely(ctx->nb_tbs == ctx->tbs_size)) {
824 ctx->tbs_size *= 2;
825 ctx->tbs = g_renew(TranslationBlock *, ctx->tbs, ctx->tbs_size);
826 }
827 ctx->tbs[ctx->nb_tbs++] = tb;
5b6dd868
BS
828 return tb;
829}
830
7d7500d9 831/* Called with tb_lock held. */
5b6dd868
BS
832void tb_free(TranslationBlock *tb)
833{
6ac3d7e8 834 assert_tb_locked();
e505a063 835
5b6dd868
BS
836 /* In practice this is mostly used for single use temporary TB
837 Ignore the hard cases and just back up if this TB happens to
838 be the last one generated. */
5e5f07e0 839 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
6e3b2bfd
EC
840 tb == tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
841 size_t struct_size = ROUND_UP(sizeof(*tb), qemu_icache_linesize);
842
843 tcg_ctx.code_gen_ptr = tb->tc_ptr - struct_size;
5e5f07e0 844 tcg_ctx.tb_ctx.nb_tbs--;
5b6dd868
BS
845 }
846}
847
848static inline void invalidate_page_bitmap(PageDesc *p)
849{
6fad459c 850#ifdef CONFIG_SOFTMMU
012aef07
MA
851 g_free(p->code_bitmap);
852 p->code_bitmap = NULL;
5b6dd868 853 p->code_write_count = 0;
6fad459c 854#endif
5b6dd868
BS
855}
856
857/* Set to NULL all the 'first_tb' fields in all PageDescs. */
858static void page_flush_tb_1(int level, void **lp)
859{
860 int i;
861
862 if (*lp == NULL) {
863 return;
864 }
865 if (level == 0) {
866 PageDesc *pd = *lp;
867
03f49957 868 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
869 pd[i].first_tb = NULL;
870 invalidate_page_bitmap(pd + i);
871 }
872 } else {
873 void **pp = *lp;
874
03f49957 875 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
876 page_flush_tb_1(level - 1, pp + i);
877 }
878 }
879}
880
881static void page_flush_tb(void)
882{
66ec9f49 883 int i, l1_sz = v_l1_size;
5b6dd868 884
66ec9f49
VK
885 for (i = 0; i < l1_sz; i++) {
886 page_flush_tb_1(v_l2_levels, l1_map + i);
5b6dd868
BS
887 }
888}
889
890/* flush all the translation blocks */
14e6fe12 891static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
5b6dd868 892{
3359baad
SF
893 tb_lock();
894
14e6fe12 895 /* If it is already been done on request of another CPU,
3359baad
SF
896 * just retry.
897 */
14e6fe12 898 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
3359baad 899 goto done;
135a972b 900 }
3359baad 901
955939a2 902#if defined(DEBUG_TB_FLUSH)
5b6dd868 903 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
0b0d3320 904 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
5e5f07e0 905 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
0b0d3320 906 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
5e5f07e0 907 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 908#endif
0b0d3320
EV
909 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
910 > tcg_ctx.code_gen_buffer_size) {
a47dddd7 911 cpu_abort(cpu, "Internal error: code buffer overflow\n");
5b6dd868 912 }
5b6dd868 913
bdc44640 914 CPU_FOREACH(cpu) {
f3ced3c5 915 cpu_tb_jmp_cache_clear(cpu);
5b6dd868
BS
916 }
917
118b0730 918 tcg_ctx.tb_ctx.nb_tbs = 0;
909eaac9 919 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
5b6dd868
BS
920 page_flush_tb();
921
0b0d3320 922 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
5b6dd868
BS
923 /* XXX: flush processor icache at this point if cache flush is
924 expensive */
3359baad
SF
925 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
926 tcg_ctx.tb_ctx.tb_flush_count + 1);
927
928done:
929 tb_unlock();
930}
931
932void tb_flush(CPUState *cpu)
933{
934 if (tcg_enabled()) {
14e6fe12
PB
935 unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
936 async_safe_run_on_cpu(cpu, do_tb_flush,
937 RUN_ON_CPU_HOST_INT(tb_flush_count));
3359baad 938 }
5b6dd868
BS
939}
940
941#ifdef DEBUG_TB_CHECK
942
909eaac9
EC
943static void
944do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
5b6dd868 945{
909eaac9
EC
946 TranslationBlock *tb = p;
947 target_ulong addr = *(target_ulong *)userp;
948
949 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
950 printf("ERROR invalidate: address=" TARGET_FMT_lx
951 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
952 }
953}
5b6dd868 954
7d7500d9
PB
955/* verify that all the pages have correct rights for code
956 *
957 * Called with tb_lock held.
958 */
909eaac9
EC
959static void tb_invalidate_check(target_ulong address)
960{
5b6dd868 961 address &= TARGET_PAGE_MASK;
909eaac9
EC
962 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
963}
964
965static void
966do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
967{
968 TranslationBlock *tb = p;
969 int flags1, flags2;
970
971 flags1 = page_get_flags(tb->pc);
972 flags2 = page_get_flags(tb->pc + tb->size - 1);
973 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
974 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
975 (long)tb->pc, tb->size, flags1, flags2);
5b6dd868
BS
976 }
977}
978
979/* verify that all the pages have correct rights for code */
980static void tb_page_check(void)
981{
909eaac9 982 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
5b6dd868
BS
983}
984
985#endif
986
5b6dd868
BS
987static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
988{
989 TranslationBlock *tb1;
990 unsigned int n1;
991
992 for (;;) {
993 tb1 = *ptb;
994 n1 = (uintptr_t)tb1 & 3;
995 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
996 if (tb1 == tb) {
997 *ptb = tb1->page_next[n1];
998 break;
999 }
1000 ptb = &tb1->page_next[n1];
1001 }
1002}
1003
13362678
SF
1004/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1005static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
5b6dd868 1006{
c37e6d7e
SF
1007 TranslationBlock *tb1;
1008 uintptr_t *ptb, ntb;
5b6dd868
BS
1009 unsigned int n1;
1010
f309101c 1011 ptb = &tb->jmp_list_next[n];
c37e6d7e 1012 if (*ptb) {
5b6dd868
BS
1013 /* find tb(n) in circular list */
1014 for (;;) {
c37e6d7e
SF
1015 ntb = *ptb;
1016 n1 = ntb & 3;
1017 tb1 = (TranslationBlock *)(ntb & ~3);
5b6dd868
BS
1018 if (n1 == n && tb1 == tb) {
1019 break;
1020 }
1021 if (n1 == 2) {
f309101c 1022 ptb = &tb1->jmp_list_first;
5b6dd868 1023 } else {
f309101c 1024 ptb = &tb1->jmp_list_next[n1];
5b6dd868
BS
1025 }
1026 }
1027 /* now we can suppress tb(n) from the list */
f309101c 1028 *ptb = tb->jmp_list_next[n];
5b6dd868 1029
c37e6d7e 1030 tb->jmp_list_next[n] = (uintptr_t)NULL;
5b6dd868
BS
1031 }
1032}
1033
1034/* reset the jump entry 'n' of a TB so that it is not chained to
1035 another TB */
1036static inline void tb_reset_jump(TranslationBlock *tb, int n)
1037{
f309101c
SF
1038 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
1039 tb_set_jmp_target(tb, n, addr);
5b6dd868
BS
1040}
1041
89bba496
SF
1042/* remove any jumps to the TB */
1043static inline void tb_jmp_unlink(TranslationBlock *tb)
1044{
f9c5b66f
SF
1045 TranslationBlock *tb1;
1046 uintptr_t *ptb, ntb;
89bba496
SF
1047 unsigned int n1;
1048
f9c5b66f 1049 ptb = &tb->jmp_list_first;
89bba496 1050 for (;;) {
f9c5b66f
SF
1051 ntb = *ptb;
1052 n1 = ntb & 3;
1053 tb1 = (TranslationBlock *)(ntb & ~3);
89bba496
SF
1054 if (n1 == 2) {
1055 break;
1056 }
f9c5b66f
SF
1057 tb_reset_jump(tb1, n1);
1058 *ptb = tb1->jmp_list_next[n1];
1059 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
89bba496 1060 }
89bba496
SF
1061}
1062
7d7500d9
PB
1063/* invalidate one TB
1064 *
1065 * Called with tb_lock held.
1066 */
5b6dd868
BS
1067void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1068{
182735ef 1069 CPUState *cpu;
5b6dd868 1070 PageDesc *p;
42bd3228 1071 uint32_t h;
5b6dd868 1072 tb_page_addr_t phys_pc;
5b6dd868 1073
6ac3d7e8 1074 assert_tb_locked();
e505a063 1075
6d21e420
PB
1076 atomic_set(&tb->invalid, true);
1077
5b6dd868
BS
1078 /* remove the TB from the hash list */
1079 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
61a67f71 1080 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
909eaac9 1081 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
5b6dd868
BS
1082
1083 /* remove the TB from the page list */
1084 if (tb->page_addr[0] != page_addr) {
1085 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1086 tb_page_remove(&p->first_tb, tb);
1087 invalidate_page_bitmap(p);
1088 }
1089 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1090 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1091 tb_page_remove(&p->first_tb, tb);
1092 invalidate_page_bitmap(p);
1093 }
1094
5b6dd868
BS
1095 /* remove the TB from the hash list */
1096 h = tb_jmp_cache_hash_func(tb->pc);
bdc44640 1097 CPU_FOREACH(cpu) {
89a16b1e
SF
1098 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1099 atomic_set(&cpu->tb_jmp_cache[h], NULL);
5b6dd868
BS
1100 }
1101 }
1102
1103 /* suppress this TB from the two jump lists */
13362678
SF
1104 tb_remove_from_jmp_list(tb, 0);
1105 tb_remove_from_jmp_list(tb, 1);
5b6dd868
BS
1106
1107 /* suppress any remaining jumps to this TB */
89bba496 1108 tb_jmp_unlink(tb);
5b6dd868 1109
5e5f07e0 1110 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
5b6dd868
BS
1111}
1112
6fad459c 1113#ifdef CONFIG_SOFTMMU
5b6dd868
BS
1114static void build_page_bitmap(PageDesc *p)
1115{
1116 int n, tb_start, tb_end;
1117 TranslationBlock *tb;
1118
510a647f 1119 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
5b6dd868
BS
1120
1121 tb = p->first_tb;
1122 while (tb != NULL) {
1123 n = (uintptr_t)tb & 3;
1124 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1125 /* NOTE: this is subtle as a TB may span two physical pages */
1126 if (n == 0) {
1127 /* NOTE: tb_end may be after the end of the page, but
1128 it is not a problem */
1129 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1130 tb_end = tb_start + tb->size;
1131 if (tb_end > TARGET_PAGE_SIZE) {
1132 tb_end = TARGET_PAGE_SIZE;
e505a063 1133 }
5b6dd868
BS
1134 } else {
1135 tb_start = 0;
1136 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1137 }
510a647f 1138 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
5b6dd868
BS
1139 tb = tb->page_next[n];
1140 }
1141}
6fad459c 1142#endif
5b6dd868 1143
e90d96b1
SF
1144/* add the tb in the target page and protect it if necessary
1145 *
1146 * Called with mmap_lock held for user-mode emulation.
1147 */
1148static inline void tb_alloc_page(TranslationBlock *tb,
1149 unsigned int n, tb_page_addr_t page_addr)
1150{
1151 PageDesc *p;
1152#ifndef CONFIG_USER_ONLY
1153 bool page_already_protected;
1154#endif
1155
e505a063
AB
1156 assert_memory_lock();
1157
e90d96b1
SF
1158 tb->page_addr[n] = page_addr;
1159 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1160 tb->page_next[n] = p->first_tb;
1161#ifndef CONFIG_USER_ONLY
1162 page_already_protected = p->first_tb != NULL;
1163#endif
1164 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1165 invalidate_page_bitmap(p);
1166
1167#if defined(CONFIG_USER_ONLY)
1168 if (p->flags & PAGE_WRITE) {
1169 target_ulong addr;
1170 PageDesc *p2;
1171 int prot;
1172
1173 /* force the host page as non writable (writes will have a
1174 page fault + mprotect overhead) */
1175 page_addr &= qemu_host_page_mask;
1176 prot = 0;
1177 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1178 addr += TARGET_PAGE_SIZE) {
1179
1180 p2 = page_find(addr >> TARGET_PAGE_BITS);
1181 if (!p2) {
1182 continue;
1183 }
1184 prot |= p2->flags;
1185 p2->flags &= ~PAGE_WRITE;
1186 }
1187 mprotect(g2h(page_addr), qemu_host_page_size,
1188 (prot & PAGE_BITS) & ~PAGE_WRITE);
1189#ifdef DEBUG_TB_INVALIDATE
1190 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1191 page_addr);
1192#endif
1193 }
1194#else
1195 /* if some code is already present, then the pages are already
1196 protected. So we handle the case where only the first TB is
1197 allocated in a physical page */
1198 if (!page_already_protected) {
1199 tlb_protect_code(page_addr);
1200 }
1201#endif
1202}
1203
1204/* add a new TB and link it to the physical page tables. phys_page2 is
1205 * (-1) to indicate that only one page contains the TB.
1206 *
1207 * Called with mmap_lock held for user-mode emulation.
1208 */
1209static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1210 tb_page_addr_t phys_page2)
1211{
42bd3228 1212 uint32_t h;
e90d96b1 1213
e505a063
AB
1214 assert_memory_lock();
1215
e90d96b1
SF
1216 /* add in the page list */
1217 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1218 if (phys_page2 != -1) {
1219 tb_alloc_page(tb, 1, phys_page2);
1220 } else {
1221 tb->page_addr[1] = -1;
1222 }
1223
2e1ae44a 1224 /* add in the hash table */
61a67f71 1225 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
2e1ae44a
AB
1226 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1227
e90d96b1
SF
1228#ifdef DEBUG_TB_CHECK
1229 tb_page_check();
1230#endif
1231}
1232
75692087 1233/* Called with mmap_lock held for user mode emulation. */
648f034c 1234TranslationBlock *tb_gen_code(CPUState *cpu,
5b6dd868 1235 target_ulong pc, target_ulong cs_base,
89fee74a 1236 uint32_t flags, int cflags)
5b6dd868 1237{
648f034c 1238 CPUArchState *env = cpu->env_ptr;
5b6dd868 1239 TranslationBlock *tb;
5b6dd868
BS
1240 tb_page_addr_t phys_pc, phys_page2;
1241 target_ulong virt_page2;
fec88f64 1242 tcg_insn_unit *gen_code_buf;
fca8a500 1243 int gen_code_size, search_size;
fec88f64
RH
1244#ifdef CONFIG_PROFILER
1245 int64_t ti;
1246#endif
e505a063 1247 assert_memory_lock();
5b6dd868
BS
1248
1249 phys_pc = get_page_addr_code(env, pc);
56c0269a 1250 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
0266359e
PB
1251 cflags |= CF_USE_ICOUNT;
1252 }
b125f9dc 1253
5b6dd868 1254 tb = tb_alloc(pc);
b125f9dc
RH
1255 if (unlikely(!tb)) {
1256 buffer_overflow:
5b6dd868 1257 /* flush must be done */
bbd77c18 1258 tb_flush(cpu);
3359baad 1259 mmap_unlock();
8499c8fc
PD
1260 /* Make the execution loop process the flush as soon as possible. */
1261 cpu->exception_index = EXCP_INTERRUPT;
3359baad 1262 cpu_loop_exit(cpu);
5b6dd868 1263 }
fec88f64
RH
1264
1265 gen_code_buf = tcg_ctx.code_gen_ptr;
1266 tb->tc_ptr = gen_code_buf;
2b48e10f 1267 tb->pc = pc;
5b6dd868
BS
1268 tb->cs_base = cs_base;
1269 tb->flags = flags;
1270 tb->cflags = cflags;
61a67f71 1271 tb->trace_vcpu_dstate = *cpu->trace_dstate;
2b48e10f 1272 tb->invalid = false;
fec88f64
RH
1273
1274#ifdef CONFIG_PROFILER
1275 tcg_ctx.tb_count1++; /* includes aborted translations because of
1276 exceptions */
1277 ti = profile_getclock();
1278#endif
1279
1280 tcg_func_start(&tcg_ctx);
1281
7c255043 1282 tcg_ctx.cpu = ENV_GET_CPU(env);
9c489ea6 1283 gen_intermediate_code(cpu, tb);
7c255043 1284 tcg_ctx.cpu = NULL;
fec88f64
RH
1285
1286 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1287
1288 /* generate machine code */
f309101c
SF
1289 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1290 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1291 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
fec88f64 1292#ifdef USE_DIRECT_JUMP
f309101c
SF
1293 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1294 tcg_ctx.tb_jmp_target_addr = NULL;
fec88f64 1295#else
f309101c
SF
1296 tcg_ctx.tb_jmp_insn_offset = NULL;
1297 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
fec88f64
RH
1298#endif
1299
1300#ifdef CONFIG_PROFILER
1301 tcg_ctx.tb_count++;
1302 tcg_ctx.interm_time += profile_getclock() - ti;
1303 tcg_ctx.code_time -= profile_getclock();
1304#endif
1305
b125f9dc
RH
1306 /* ??? Overflow could be handled better here. In particular, we
1307 don't need to re-do gen_intermediate_code, nor should we re-do
1308 the tcg optimization currently hidden inside tcg_gen_code. All
1309 that should be required is to flush the TBs, allocate a new TB,
1310 re-initialize it per above, and re-do the actual code generation. */
5bd2ec3d 1311 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
b125f9dc
RH
1312 if (unlikely(gen_code_size < 0)) {
1313 goto buffer_overflow;
1314 }
fca8a500 1315 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
b125f9dc
RH
1316 if (unlikely(search_size < 0)) {
1317 goto buffer_overflow;
1318 }
fec88f64
RH
1319
1320#ifdef CONFIG_PROFILER
1321 tcg_ctx.code_time += profile_getclock();
1322 tcg_ctx.code_in_len += tb->size;
1323 tcg_ctx.code_out_len += gen_code_size;
fca8a500 1324 tcg_ctx.search_out_len += search_size;
fec88f64
RH
1325#endif
1326
1327#ifdef DEBUG_DISAS
d977e1c2
AB
1328 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1329 qemu_log_in_addr_range(tb->pc)) {
1ee73216 1330 qemu_log_lock();
fec88f64
RH
1331 qemu_log("OUT: [size=%d]\n", gen_code_size);
1332 log_disas(tb->tc_ptr, gen_code_size);
1333 qemu_log("\n");
1334 qemu_log_flush();
1ee73216 1335 qemu_log_unlock();
fec88f64
RH
1336 }
1337#endif
1338
fca8a500
RH
1339 tcg_ctx.code_gen_ptr = (void *)
1340 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1341 CODE_GEN_ALIGN);
5b6dd868 1342
901bc3de
SF
1343 /* init jump list */
1344 assert(((uintptr_t)tb & 3) == 0);
1345 tb->jmp_list_first = (uintptr_t)tb | 2;
1346 tb->jmp_list_next[0] = (uintptr_t)NULL;
1347 tb->jmp_list_next[1] = (uintptr_t)NULL;
1348
1349 /* init original jump addresses wich has been set during tcg_gen_code() */
1350 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1351 tb_reset_jump(tb, 0);
1352 }
1353 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1354 tb_reset_jump(tb, 1);
1355 }
1356
5b6dd868
BS
1357 /* check next page if needed */
1358 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1359 phys_page2 = -1;
1360 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1361 phys_page2 = get_page_addr_code(env, virt_page2);
1362 }
901bc3de
SF
1363 /* As long as consistency of the TB stuff is provided by tb_lock in user
1364 * mode and is implicit in single-threaded softmmu emulation, no explicit
1365 * memory barrier is required before tb_link_page() makes the TB visible
1366 * through the physical hash table and physical page list.
1367 */
5b6dd868
BS
1368 tb_link_page(tb, phys_pc, phys_page2);
1369 return tb;
1370}
1371
1372/*
1373 * Invalidate all TBs which intersect with the target physical address range
1374 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1375 * 'is_cpu_write_access' should be true if called from a real cpu write
1376 * access: the virtual CPU will exit the current TB if code is modified inside
1377 * this TB.
75692087 1378 *
ba051fb5
AB
1379 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1380 * Called with tb_lock held for system-mode emulation
5b6dd868 1381 */
ba051fb5 1382static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
5b6dd868
BS
1383{
1384 while (start < end) {
35865339 1385 tb_invalidate_phys_page_range(start, end, 0);
5b6dd868
BS
1386 start &= TARGET_PAGE_MASK;
1387 start += TARGET_PAGE_SIZE;
1388 }
1389}
1390
ba051fb5
AB
1391#ifdef CONFIG_SOFTMMU
1392void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1393{
6ac3d7e8 1394 assert_tb_locked();
ba051fb5
AB
1395 tb_invalidate_phys_range_1(start, end);
1396}
1397#else
1398void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1399{
1400 assert_memory_lock();
1401 tb_lock();
1402 tb_invalidate_phys_range_1(start, end);
1403 tb_unlock();
1404}
1405#endif
5b6dd868
BS
1406/*
1407 * Invalidate all TBs which intersect with the target physical address range
1408 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1409 * 'is_cpu_write_access' should be true if called from a real cpu write
1410 * access: the virtual CPU will exit the current TB if code is modified inside
1411 * this TB.
75692087 1412 *
ba051fb5
AB
1413 * Called with tb_lock/mmap_lock held for user-mode emulation
1414 * Called with tb_lock held for system-mode emulation
5b6dd868
BS
1415 */
1416void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1417 int is_cpu_write_access)
1418{
3213525f 1419 TranslationBlock *tb, *tb_next;
baea4fae 1420#if defined(TARGET_HAS_PRECISE_SMC)
3213525f 1421 CPUState *cpu = current_cpu;
4917cf44
AF
1422 CPUArchState *env = NULL;
1423#endif
5b6dd868
BS
1424 tb_page_addr_t tb_start, tb_end;
1425 PageDesc *p;
1426 int n;
1427#ifdef TARGET_HAS_PRECISE_SMC
1428 int current_tb_not_found = is_cpu_write_access;
1429 TranslationBlock *current_tb = NULL;
1430 int current_tb_modified = 0;
1431 target_ulong current_pc = 0;
1432 target_ulong current_cs_base = 0;
89fee74a 1433 uint32_t current_flags = 0;
5b6dd868
BS
1434#endif /* TARGET_HAS_PRECISE_SMC */
1435
e505a063 1436 assert_memory_lock();
6ac3d7e8 1437 assert_tb_locked();
e505a063 1438
5b6dd868
BS
1439 p = page_find(start >> TARGET_PAGE_BITS);
1440 if (!p) {
1441 return;
1442 }
baea4fae 1443#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1444 if (cpu != NULL) {
1445 env = cpu->env_ptr;
d77953b9 1446 }
4917cf44 1447#endif
5b6dd868
BS
1448
1449 /* we remove all the TBs in the range [start, end[ */
1450 /* XXX: see if in some cases it could be faster to invalidate all
1451 the code */
1452 tb = p->first_tb;
1453 while (tb != NULL) {
1454 n = (uintptr_t)tb & 3;
1455 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1456 tb_next = tb->page_next[n];
1457 /* NOTE: this is subtle as a TB may span two physical pages */
1458 if (n == 0) {
1459 /* NOTE: tb_end may be after the end of the page, but
1460 it is not a problem */
1461 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1462 tb_end = tb_start + tb->size;
1463 } else {
1464 tb_start = tb->page_addr[1];
1465 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1466 }
1467 if (!(tb_end <= start || tb_start >= end)) {
1468#ifdef TARGET_HAS_PRECISE_SMC
1469 if (current_tb_not_found) {
1470 current_tb_not_found = 0;
1471 current_tb = NULL;
93afeade 1472 if (cpu->mem_io_pc) {
5b6dd868 1473 /* now we have a real cpu fault */
93afeade 1474 current_tb = tb_find_pc(cpu->mem_io_pc);
5b6dd868
BS
1475 }
1476 }
1477 if (current_tb == tb &&
1478 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1479 /* If we are modifying the current TB, we must stop
1480 its execution. We could be more precise by checking
1481 that the modification is after the current PC, but it
1482 would require a specialized function to partially
1483 restore the CPU state */
1484
1485 current_tb_modified = 1;
74f10515 1486 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
5b6dd868
BS
1487 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1488 &current_flags);
1489 }
1490#endif /* TARGET_HAS_PRECISE_SMC */
5b6dd868 1491 tb_phys_invalidate(tb, -1);
5b6dd868
BS
1492 }
1493 tb = tb_next;
1494 }
1495#if !defined(CONFIG_USER_ONLY)
1496 /* if no code remaining, no need to continue to use slow writes */
1497 if (!p->first_tb) {
1498 invalidate_page_bitmap(p);
fc377bcf 1499 tlb_unprotect_code(start);
5b6dd868
BS
1500 }
1501#endif
1502#ifdef TARGET_HAS_PRECISE_SMC
1503 if (current_tb_modified) {
1504 /* we generate a block containing just the instruction
1505 modifying the memory. It will ensure that it cannot modify
1506 itself */
648f034c 1507 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
6886b980 1508 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
1509 }
1510#endif
1511}
1512
6fad459c 1513#ifdef CONFIG_SOFTMMU
ba051fb5
AB
1514/* len must be <= 8 and start must be a multiple of len.
1515 * Called via softmmu_template.h when code areas are written to with
8d04fb55 1516 * iothread mutex not held.
ba051fb5 1517 */
5b6dd868
BS
1518void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1519{
1520 PageDesc *p;
5b6dd868
BS
1521
1522#if 0
1523 if (1) {
1524 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1525 cpu_single_env->mem_io_vaddr, len,
1526 cpu_single_env->eip,
1527 cpu_single_env->eip +
1528 (intptr_t)cpu_single_env->segs[R_CS].base);
1529 }
1530#endif
ba051fb5
AB
1531 assert_memory_lock();
1532
5b6dd868
BS
1533 p = page_find(start >> TARGET_PAGE_BITS);
1534 if (!p) {
1535 return;
1536 }
fc377bcf
PB
1537 if (!p->code_bitmap &&
1538 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
7d7500d9
PB
1539 /* build code bitmap. FIXME: writes should be protected by
1540 * tb_lock, reads by tb_lock or RCU.
1541 */
fc377bcf
PB
1542 build_page_bitmap(p);
1543 }
5b6dd868 1544 if (p->code_bitmap) {
510a647f
EC
1545 unsigned int nr;
1546 unsigned long b;
1547
1548 nr = start & ~TARGET_PAGE_MASK;
1549 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
5b6dd868
BS
1550 if (b & ((1 << len) - 1)) {
1551 goto do_invalidate;
1552 }
1553 } else {
1554 do_invalidate:
1555 tb_invalidate_phys_page_range(start, start + len, 1);
1556 }
1557}
6fad459c 1558#else
75809229
PM
1559/* Called with mmap_lock held. If pc is not 0 then it indicates the
1560 * host PC of the faulting store instruction that caused this invalidate.
1561 * Returns true if the caller needs to abort execution of the current
1562 * TB (because it was modified by this store and the guest CPU has
1563 * precise-SMC semantics).
1564 */
1565static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
5b6dd868
BS
1566{
1567 TranslationBlock *tb;
1568 PageDesc *p;
1569 int n;
1570#ifdef TARGET_HAS_PRECISE_SMC
1571 TranslationBlock *current_tb = NULL;
4917cf44
AF
1572 CPUState *cpu = current_cpu;
1573 CPUArchState *env = NULL;
5b6dd868
BS
1574 int current_tb_modified = 0;
1575 target_ulong current_pc = 0;
1576 target_ulong current_cs_base = 0;
89fee74a 1577 uint32_t current_flags = 0;
5b6dd868
BS
1578#endif
1579
ba051fb5
AB
1580 assert_memory_lock();
1581
5b6dd868
BS
1582 addr &= TARGET_PAGE_MASK;
1583 p = page_find(addr >> TARGET_PAGE_BITS);
1584 if (!p) {
75809229 1585 return false;
5b6dd868 1586 }
a5e99826
FK
1587
1588 tb_lock();
5b6dd868
BS
1589 tb = p->first_tb;
1590#ifdef TARGET_HAS_PRECISE_SMC
1591 if (tb && pc != 0) {
1592 current_tb = tb_find_pc(pc);
1593 }
4917cf44
AF
1594 if (cpu != NULL) {
1595 env = cpu->env_ptr;
d77953b9 1596 }
5b6dd868
BS
1597#endif
1598 while (tb != NULL) {
1599 n = (uintptr_t)tb & 3;
1600 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1601#ifdef TARGET_HAS_PRECISE_SMC
1602 if (current_tb == tb &&
1603 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1604 /* If we are modifying the current TB, we must stop
1605 its execution. We could be more precise by checking
1606 that the modification is after the current PC, but it
1607 would require a specialized function to partially
1608 restore the CPU state */
1609
1610 current_tb_modified = 1;
74f10515 1611 cpu_restore_state_from_tb(cpu, current_tb, pc);
5b6dd868
BS
1612 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1613 &current_flags);
1614 }
1615#endif /* TARGET_HAS_PRECISE_SMC */
1616 tb_phys_invalidate(tb, addr);
1617 tb = tb->page_next[n];
1618 }
1619 p->first_tb = NULL;
1620#ifdef TARGET_HAS_PRECISE_SMC
1621 if (current_tb_modified) {
1622 /* we generate a block containing just the instruction
1623 modifying the memory. It will ensure that it cannot modify
1624 itself */
648f034c 1625 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
a5e99826
FK
1626 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1627 * back into the cpu_exec loop. */
75809229 1628 return true;
5b6dd868
BS
1629 }
1630#endif
a5e99826
FK
1631 tb_unlock();
1632
75809229 1633 return false;
5b6dd868
BS
1634}
1635#endif
1636
5b6dd868
BS
1637/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1638 tb[1].tc_ptr. Return NULL if not found */
a8a826a3 1639static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
5b6dd868
BS
1640{
1641 int m_min, m_max, m;
1642 uintptr_t v;
1643 TranslationBlock *tb;
1644
5e5f07e0 1645 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
5b6dd868
BS
1646 return NULL;
1647 }
0b0d3320
EV
1648 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1649 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
5b6dd868
BS
1650 return NULL;
1651 }
1652 /* binary search (cf Knuth) */
1653 m_min = 0;
5e5f07e0 1654 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
5b6dd868
BS
1655 while (m_min <= m_max) {
1656 m = (m_min + m_max) >> 1;
6e3b2bfd 1657 tb = tcg_ctx.tb_ctx.tbs[m];
5b6dd868
BS
1658 v = (uintptr_t)tb->tc_ptr;
1659 if (v == tc_ptr) {
1660 return tb;
1661 } else if (tc_ptr < v) {
1662 m_max = m - 1;
1663 } else {
1664 m_min = m + 1;
1665 }
1666 }
6e3b2bfd 1667 return tcg_ctx.tb_ctx.tbs[m_max];
5b6dd868
BS
1668}
1669
ec53b45b 1670#if !defined(CONFIG_USER_ONLY)
29d8ec7b 1671void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
5b6dd868
BS
1672{
1673 ram_addr_t ram_addr;
5c8a00ce 1674 MemoryRegion *mr;
149f54b5 1675 hwaddr l = 1;
5b6dd868 1676
41063e1e 1677 rcu_read_lock();
29d8ec7b 1678 mr = address_space_translate(as, addr, &addr, &l, false);
5c8a00ce
PB
1679 if (!(memory_region_is_ram(mr)
1680 || memory_region_is_romd(mr))) {
41063e1e 1681 rcu_read_unlock();
5b6dd868
BS
1682 return;
1683 }
e4e69794 1684 ram_addr = memory_region_get_ram_addr(mr) + addr;
ba051fb5 1685 tb_lock();
5b6dd868 1686 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
ba051fb5 1687 tb_unlock();
41063e1e 1688 rcu_read_unlock();
5b6dd868 1689}
ec53b45b 1690#endif /* !defined(CONFIG_USER_ONLY) */
5b6dd868 1691
7d7500d9 1692/* Called with tb_lock held. */
239c51a5 1693void tb_check_watchpoint(CPUState *cpu)
5b6dd868
BS
1694{
1695 TranslationBlock *tb;
1696
93afeade 1697 tb = tb_find_pc(cpu->mem_io_pc);
8d302e76
AJ
1698 if (tb) {
1699 /* We can use retranslation to find the PC. */
1700 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1701 tb_phys_invalidate(tb, -1);
1702 } else {
1703 /* The exception probably happened in a helper. The CPU state should
1704 have been saved before calling it. Fetch the PC from there. */
1705 CPUArchState *env = cpu->env_ptr;
1706 target_ulong pc, cs_base;
1707 tb_page_addr_t addr;
89fee74a 1708 uint32_t flags;
8d302e76
AJ
1709
1710 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1711 addr = get_page_addr_code(env, pc);
1712 tb_invalidate_phys_range(addr, addr + 1);
5b6dd868 1713 }
5b6dd868
BS
1714}
1715
1716#ifndef CONFIG_USER_ONLY
5b6dd868 1717/* in deterministic execution mode, instructions doing device I/Os
8d04fb55
JK
1718 * must be at the end of the TB.
1719 *
1720 * Called by softmmu_template.h, with iothread mutex not held.
1721 */
90b40a69 1722void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
5b6dd868 1723{
a47dddd7 1724#if defined(TARGET_MIPS) || defined(TARGET_SH4)
90b40a69 1725 CPUArchState *env = cpu->env_ptr;
a47dddd7 1726#endif
5b6dd868
BS
1727 TranslationBlock *tb;
1728 uint32_t n, cflags;
1729 target_ulong pc, cs_base;
89fee74a 1730 uint32_t flags;
5b6dd868 1731
a5e99826 1732 tb_lock();
5b6dd868
BS
1733 tb = tb_find_pc(retaddr);
1734 if (!tb) {
a47dddd7 1735 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
5b6dd868
BS
1736 (void *)retaddr);
1737 }
28ecfd7a 1738 n = cpu->icount_decr.u16.low + tb->icount;
74f10515 1739 cpu_restore_state_from_tb(cpu, tb, retaddr);
5b6dd868
BS
1740 /* Calculate how many instructions had been executed before the fault
1741 occurred. */
28ecfd7a 1742 n = n - cpu->icount_decr.u16.low;
5b6dd868
BS
1743 /* Generate a new TB ending on the I/O insn. */
1744 n++;
1745 /* On MIPS and SH, delay slot instructions can only be restarted if
1746 they were already the first instruction in the TB. If this is not
1747 the first instruction in a TB then re-execute the preceding
1748 branch. */
1749#if defined(TARGET_MIPS)
1750 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
c3577479 1751 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
28ecfd7a 1752 cpu->icount_decr.u16.low++;
5b6dd868
BS
1753 env->hflags &= ~MIPS_HFLAG_BMASK;
1754 }
1755#elif defined(TARGET_SH4)
1756 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1757 && n > 1) {
1758 env->pc -= 2;
28ecfd7a 1759 cpu->icount_decr.u16.low++;
5b6dd868
BS
1760 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1761 }
1762#endif
1763 /* This should never happen. */
1764 if (n > CF_COUNT_MASK) {
a47dddd7 1765 cpu_abort(cpu, "TB too big during recompile");
5b6dd868
BS
1766 }
1767
1768 cflags = n | CF_LAST_IO;
1769 pc = tb->pc;
1770 cs_base = tb->cs_base;
1771 flags = tb->flags;
1772 tb_phys_invalidate(tb, -1);
02d57ea1
SF
1773 if (tb->cflags & CF_NOCACHE) {
1774 if (tb->orig_tb) {
1775 /* Invalidate original TB if this TB was generated in
1776 * cpu_exec_nocache() */
1777 tb_phys_invalidate(tb->orig_tb, -1);
1778 }
1779 tb_free(tb);
1780 }
5b6dd868
BS
1781 /* FIXME: In theory this could raise an exception. In practice
1782 we have already translated the block once so it's probably ok. */
648f034c 1783 tb_gen_code(cpu, pc, cs_base, flags, cflags);
a5e99826 1784
5b6dd868 1785 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
a5e99826
FK
1786 * the first in the TB) then we end up generating a whole new TB and
1787 * repeating the fault, which is horribly inefficient.
1788 * Better would be to execute just this insn uncached, or generate a
1789 * second new TB.
1790 *
1791 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1792 * tb_lock gets reset.
1793 */
6886b980 1794 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
1795}
1796
f3ced3c5 1797static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
5b6dd868 1798{
f3ced3c5 1799 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
5b6dd868 1800
f3ced3c5
EC
1801 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
1802 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
1803 }
1804}
1805
1806void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1807{
5b6dd868
BS
1808 /* Discard jump cache entries for any tb which might potentially
1809 overlap the flushed page. */
f3ced3c5
EC
1810 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
1811 tb_jmp_cache_clear_page(cpu, addr);
5b6dd868
BS
1812}
1813
7266ae91
EC
1814static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1815 struct qht_stats hst)
1816{
1817 uint32_t hgram_opts;
1818 size_t hgram_bins;
1819 char *hgram;
1820
1821 if (!hst.head_buckets) {
1822 return;
1823 }
1824 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1825 hst.used_head_buckets, hst.head_buckets,
1826 (double)hst.used_head_buckets / hst.head_buckets * 100);
1827
1828 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1829 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1830 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1831 hgram_opts |= QDIST_PR_NODECIMAL;
1832 }
1833 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1834 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1835 qdist_avg(&hst.occupancy) * 100, hgram);
1836 g_free(hgram);
1837
1838 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1839 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1840 if (hgram_bins > 10) {
1841 hgram_bins = 10;
1842 } else {
1843 hgram_bins = 0;
1844 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1845 }
1846 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1847 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1848 qdist_avg(&hst.chain), hgram);
1849 g_free(hgram);
1850}
1851
5b6dd868
BS
1852void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1853{
1854 int i, target_code_size, max_target_code_size;
1855 int direct_jmp_count, direct_jmp2_count, cross_page;
1856 TranslationBlock *tb;
329844d4 1857 struct qht_stats hst;
5b6dd868 1858
a5e99826
FK
1859 tb_lock();
1860
5b6dd868
BS
1861 target_code_size = 0;
1862 max_target_code_size = 0;
1863 cross_page = 0;
1864 direct_jmp_count = 0;
1865 direct_jmp2_count = 0;
5e5f07e0 1866 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
6e3b2bfd 1867 tb = tcg_ctx.tb_ctx.tbs[i];
5b6dd868
BS
1868 target_code_size += tb->size;
1869 if (tb->size > max_target_code_size) {
1870 max_target_code_size = tb->size;
1871 }
1872 if (tb->page_addr[1] != -1) {
1873 cross_page++;
1874 }
f309101c 1875 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
5b6dd868 1876 direct_jmp_count++;
f309101c 1877 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
5b6dd868
BS
1878 direct_jmp2_count++;
1879 }
1880 }
1881 }
1882 /* XXX: avoid using doubles ? */
1883 cpu_fprintf(f, "Translation buffer state:\n");
1884 cpu_fprintf(f, "gen code size %td/%zd\n",
0b0d3320 1885 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
b125f9dc 1886 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
6e3b2bfd 1887 cpu_fprintf(f, "TB count %d\n", tcg_ctx.tb_ctx.nb_tbs);
5b6dd868 1888 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
5e5f07e0
EV
1889 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1890 tcg_ctx.tb_ctx.nb_tbs : 0,
1891 max_target_code_size);
5b6dd868 1892 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
5e5f07e0
EV
1893 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1894 tcg_ctx.code_gen_buffer) /
1895 tcg_ctx.tb_ctx.nb_tbs : 0,
1896 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1897 tcg_ctx.code_gen_buffer) /
1898 target_code_size : 0);
1899 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1900 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1901 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868
BS
1902 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1903 direct_jmp_count,
5e5f07e0
EV
1904 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1905 tcg_ctx.tb_ctx.nb_tbs : 0,
5b6dd868 1906 direct_jmp2_count,
5e5f07e0
EV
1907 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1908 tcg_ctx.tb_ctx.nb_tbs : 0);
329844d4
EC
1909
1910 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
7266ae91 1911 print_qht_statistics(f, cpu_fprintf, hst);
329844d4
EC
1912 qht_statistics_destroy(&hst);
1913
5b6dd868 1914 cpu_fprintf(f, "\nStatistics:\n");
3359baad
SF
1915 cpu_fprintf(f, "TB flush count %u\n",
1916 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
5e5f07e0
EV
1917 cpu_fprintf(f, "TB invalidate count %d\n",
1918 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
5b6dd868
BS
1919 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1920 tcg_dump_info(f, cpu_fprintf);
a5e99826
FK
1921
1922 tb_unlock();
5b6dd868
BS
1923}
1924
246ae24d
MF
1925void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1926{
1927 tcg_dump_op_count(f, cpu_fprintf);
1928}
1929
5b6dd868
BS
1930#else /* CONFIG_USER_ONLY */
1931
c3affe56 1932void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 1933{
8d04fb55 1934 g_assert(qemu_mutex_iothread_locked());
259186a7 1935 cpu->interrupt_request |= mask;
1aab16c2 1936 cpu->icount_decr.u16.high = -1;
5b6dd868
BS
1937}
1938
1939/*
1940 * Walks guest process memory "regions" one by one
1941 * and calls callback function 'fn' for each region.
1942 */
1943struct walk_memory_regions_data {
1944 walk_memory_regions_fn fn;
1945 void *priv;
1a1c4db9 1946 target_ulong start;
5b6dd868
BS
1947 int prot;
1948};
1949
1950static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1a1c4db9 1951 target_ulong end, int new_prot)
5b6dd868 1952{
1a1c4db9 1953 if (data->start != -1u) {
5b6dd868
BS
1954 int rc = data->fn(data->priv, data->start, end, data->prot);
1955 if (rc != 0) {
1956 return rc;
1957 }
1958 }
1959
1a1c4db9 1960 data->start = (new_prot ? end : -1u);
5b6dd868
BS
1961 data->prot = new_prot;
1962
1963 return 0;
1964}
1965
1966static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1a1c4db9 1967 target_ulong base, int level, void **lp)
5b6dd868 1968{
1a1c4db9 1969 target_ulong pa;
5b6dd868
BS
1970 int i, rc;
1971
1972 if (*lp == NULL) {
1973 return walk_memory_regions_end(data, base, 0);
1974 }
1975
1976 if (level == 0) {
1977 PageDesc *pd = *lp;
1978
03f49957 1979 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
1980 int prot = pd[i].flags;
1981
1982 pa = base | (i << TARGET_PAGE_BITS);
1983 if (prot != data->prot) {
1984 rc = walk_memory_regions_end(data, pa, prot);
1985 if (rc != 0) {
1986 return rc;
1987 }
1988 }
1989 }
1990 } else {
1991 void **pp = *lp;
1992
03f49957 1993 for (i = 0; i < V_L2_SIZE; ++i) {
1a1c4db9 1994 pa = base | ((target_ulong)i <<
03f49957 1995 (TARGET_PAGE_BITS + V_L2_BITS * level));
5b6dd868
BS
1996 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1997 if (rc != 0) {
1998 return rc;
1999 }
2000 }
2001 }
2002
2003 return 0;
2004}
2005
2006int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2007{
2008 struct walk_memory_regions_data data;
66ec9f49 2009 uintptr_t i, l1_sz = v_l1_size;
5b6dd868
BS
2010
2011 data.fn = fn;
2012 data.priv = priv;
1a1c4db9 2013 data.start = -1u;
5b6dd868
BS
2014 data.prot = 0;
2015
66ec9f49
VK
2016 for (i = 0; i < l1_sz; i++) {
2017 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2018 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
5b6dd868
BS
2019 if (rc != 0) {
2020 return rc;
2021 }
2022 }
2023
2024 return walk_memory_regions_end(&data, 0, 0);
2025}
2026
1a1c4db9
MI
2027static int dump_region(void *priv, target_ulong start,
2028 target_ulong end, unsigned long prot)
5b6dd868
BS
2029{
2030 FILE *f = (FILE *)priv;
2031
1a1c4db9
MI
2032 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2033 " "TARGET_FMT_lx" %c%c%c\n",
5b6dd868
BS
2034 start, end, end - start,
2035 ((prot & PAGE_READ) ? 'r' : '-'),
2036 ((prot & PAGE_WRITE) ? 'w' : '-'),
2037 ((prot & PAGE_EXEC) ? 'x' : '-'));
2038
2039 return 0;
2040}
2041
2042/* dump memory mappings */
2043void page_dump(FILE *f)
2044{
1a1c4db9 2045 const int length = sizeof(target_ulong) * 2;
227b8175
SW
2046 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2047 length, "start", length, "end", length, "size", "prot");
5b6dd868
BS
2048 walk_memory_regions(f, dump_region);
2049}
2050
2051int page_get_flags(target_ulong address)
2052{
2053 PageDesc *p;
2054
2055 p = page_find(address >> TARGET_PAGE_BITS);
2056 if (!p) {
2057 return 0;
2058 }
2059 return p->flags;
2060}
2061
2062/* Modify the flags of a page and invalidate the code if necessary.
2063 The flag PAGE_WRITE_ORG is positioned automatically depending
2064 on PAGE_WRITE. The mmap_lock should already be held. */
2065void page_set_flags(target_ulong start, target_ulong end, int flags)
2066{
2067 target_ulong addr, len;
2068
2069 /* This function should never be called with addresses outside the
2070 guest address space. If this assert fires, it probably indicates
2071 a missing call to h2g_valid. */
2072#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 2073 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
2074#endif
2075 assert(start < end);
e505a063 2076 assert_memory_lock();
5b6dd868
BS
2077
2078 start = start & TARGET_PAGE_MASK;
2079 end = TARGET_PAGE_ALIGN(end);
2080
2081 if (flags & PAGE_WRITE) {
2082 flags |= PAGE_WRITE_ORG;
2083 }
2084
2085 for (addr = start, len = end - start;
2086 len != 0;
2087 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2088 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2089
2090 /* If the write protection bit is set, then we invalidate
2091 the code inside. */
2092 if (!(p->flags & PAGE_WRITE) &&
2093 (flags & PAGE_WRITE) &&
2094 p->first_tb) {
75809229 2095 tb_invalidate_phys_page(addr, 0);
5b6dd868
BS
2096 }
2097 p->flags = flags;
2098 }
2099}
2100
2101int page_check_range(target_ulong start, target_ulong len, int flags)
2102{
2103 PageDesc *p;
2104 target_ulong end;
2105 target_ulong addr;
2106
2107 /* This function should never be called with addresses outside the
2108 guest address space. If this assert fires, it probably indicates
2109 a missing call to h2g_valid. */
2110#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 2111 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
2112#endif
2113
2114 if (len == 0) {
2115 return 0;
2116 }
2117 if (start + len - 1 < start) {
2118 /* We've wrapped around. */
2119 return -1;
2120 }
2121
2122 /* must do before we loose bits in the next step */
2123 end = TARGET_PAGE_ALIGN(start + len);
2124 start = start & TARGET_PAGE_MASK;
2125
2126 for (addr = start, len = end - start;
2127 len != 0;
2128 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2129 p = page_find(addr >> TARGET_PAGE_BITS);
2130 if (!p) {
2131 return -1;
2132 }
2133 if (!(p->flags & PAGE_VALID)) {
2134 return -1;
2135 }
2136
2137 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2138 return -1;
2139 }
2140 if (flags & PAGE_WRITE) {
2141 if (!(p->flags & PAGE_WRITE_ORG)) {
2142 return -1;
2143 }
2144 /* unprotect the page if it was put read-only because it
2145 contains translated code */
2146 if (!(p->flags & PAGE_WRITE)) {
f213e72f 2147 if (!page_unprotect(addr, 0)) {
5b6dd868
BS
2148 return -1;
2149 }
2150 }
5b6dd868
BS
2151 }
2152 }
2153 return 0;
2154}
2155
2156/* called from signal handler: invalidate the code and unprotect the
f213e72f
PM
2157 * page. Return 0 if the fault was not handled, 1 if it was handled,
2158 * and 2 if it was handled but the caller must cause the TB to be
2159 * immediately exited. (We can only return 2 if the 'pc' argument is
2160 * non-zero.)
2161 */
2162int page_unprotect(target_ulong address, uintptr_t pc)
5b6dd868
BS
2163{
2164 unsigned int prot;
7399a337 2165 bool current_tb_invalidated;
5b6dd868
BS
2166 PageDesc *p;
2167 target_ulong host_start, host_end, addr;
2168
2169 /* Technically this isn't safe inside a signal handler. However we
2170 know this only ever happens in a synchronous SEGV handler, so in
2171 practice it seems to be ok. */
2172 mmap_lock();
2173
2174 p = page_find(address >> TARGET_PAGE_BITS);
2175 if (!p) {
2176 mmap_unlock();
2177 return 0;
2178 }
2179
2180 /* if the page was really writable, then we change its
2181 protection back to writable */
2182 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2183 host_start = address & qemu_host_page_mask;
2184 host_end = host_start + qemu_host_page_size;
2185
2186 prot = 0;
7399a337 2187 current_tb_invalidated = false;
5b6dd868
BS
2188 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2189 p = page_find(addr >> TARGET_PAGE_BITS);
2190 p->flags |= PAGE_WRITE;
2191 prot |= p->flags;
2192
2193 /* and since the content will be modified, we must invalidate
2194 the corresponding translated code. */
7399a337 2195 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
5b6dd868
BS
2196#ifdef DEBUG_TB_CHECK
2197 tb_invalidate_check(addr);
2198#endif
2199 }
2200 mprotect((void *)g2h(host_start), qemu_host_page_size,
2201 prot & PAGE_BITS);
2202
2203 mmap_unlock();
7399a337
SS
2204 /* If current TB was invalidated return to main loop */
2205 return current_tb_invalidated ? 2 : 1;
5b6dd868
BS
2206 }
2207 mmap_unlock();
2208 return 0;
2209}
2210#endif /* CONFIG_USER_ONLY */
2cd53943
TH
2211
2212/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2213void tcg_flush_softmmu_tlb(CPUState *cs)
2214{
2215#ifdef CONFIG_SOFTMMU
2216 tlb_flush(cs);
2217#endif
2218}
This page took 1.260142 seconds and 4 git commands to generate.