]>
Commit | Line | Data |
---|---|---|
d19893da FB |
1 | /* |
2 | * Host code generation | |
5fafdf24 | 3 | * |
d19893da FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d19893da | 18 | */ |
5b6dd868 BS |
19 | #ifdef _WIN32 |
20 | #include <windows.h> | |
21 | #else | |
22 | #include <sys/types.h> | |
23 | #include <sys/mman.h> | |
24 | #endif | |
d19893da FB |
25 | #include <stdarg.h> |
26 | #include <stdlib.h> | |
27 | #include <stdio.h> | |
28 | #include <string.h> | |
29 | #include <inttypes.h> | |
30 | ||
31 | #include "config.h" | |
2054396a | 32 | |
5b6dd868 | 33 | #include "qemu-common.h" |
af5ad107 | 34 | #define NO_CPU_IO_DEFS |
d3eead2e | 35 | #include "cpu.h" |
6db8b538 | 36 | #include "trace.h" |
76cad711 | 37 | #include "disas/disas.h" |
57fec1fe | 38 | #include "tcg.h" |
5b6dd868 BS |
39 | #if defined(CONFIG_USER_ONLY) |
40 | #include "qemu.h" | |
41 | #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) | |
42 | #include <sys/param.h> | |
43 | #if __FreeBSD_version >= 700104 | |
44 | #define HAVE_KINFO_GETVMMAP | |
45 | #define sigqueue sigqueue_freebsd /* avoid redefinition */ | |
46 | #include <sys/time.h> | |
47 | #include <sys/proc.h> | |
48 | #include <machine/profile.h> | |
49 | #define _KERNEL | |
50 | #include <sys/user.h> | |
51 | #undef _KERNEL | |
52 | #undef sigqueue | |
53 | #include <libutil.h> | |
54 | #endif | |
55 | #endif | |
0bc3cd62 PB |
56 | #else |
57 | #include "exec/address-spaces.h" | |
5b6dd868 BS |
58 | #endif |
59 | ||
022c62cb | 60 | #include "exec/cputlb.h" |
e1b89321 | 61 | #include "exec/tb-hash.h" |
5b6dd868 | 62 | #include "translate-all.h" |
510a647f | 63 | #include "qemu/bitmap.h" |
0aa09897 | 64 | #include "qemu/timer.h" |
5b6dd868 BS |
65 | |
66 | //#define DEBUG_TB_INVALIDATE | |
67 | //#define DEBUG_FLUSH | |
68 | /* make various TB consistency checks */ | |
69 | //#define DEBUG_TB_CHECK | |
70 | ||
71 | #if !defined(CONFIG_USER_ONLY) | |
72 | /* TB consistency checks only implemented for usermode emulation. */ | |
73 | #undef DEBUG_TB_CHECK | |
74 | #endif | |
75 | ||
76 | #define SMC_BITMAP_USE_THRESHOLD 10 | |
77 | ||
5b6dd868 BS |
78 | typedef struct PageDesc { |
79 | /* list of TBs intersecting this ram page */ | |
80 | TranslationBlock *first_tb; | |
81 | /* in order to optimize self modifying code, we count the number | |
82 | of lookups we do to a given page to use a bitmap */ | |
83 | unsigned int code_write_count; | |
510a647f | 84 | unsigned long *code_bitmap; |
5b6dd868 BS |
85 | #if defined(CONFIG_USER_ONLY) |
86 | unsigned long flags; | |
87 | #endif | |
88 | } PageDesc; | |
89 | ||
90 | /* In system mode we want L1_MAP to be based on ram offsets, | |
91 | while in user mode we want it to be based on virtual addresses. */ | |
92 | #if !defined(CONFIG_USER_ONLY) | |
93 | #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS | |
94 | # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS | |
95 | #else | |
96 | # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS | |
97 | #endif | |
98 | #else | |
99 | # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS | |
100 | #endif | |
101 | ||
03f49957 PB |
102 | /* Size of the L2 (and L3, etc) page tables. */ |
103 | #define V_L2_BITS 10 | |
104 | #define V_L2_SIZE (1 << V_L2_BITS) | |
105 | ||
5b6dd868 BS |
106 | /* The bits remaining after N lower levels of page tables. */ |
107 | #define V_L1_BITS_REM \ | |
03f49957 | 108 | ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS) |
5b6dd868 BS |
109 | |
110 | #if V_L1_BITS_REM < 4 | |
03f49957 | 111 | #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS) |
5b6dd868 BS |
112 | #else |
113 | #define V_L1_BITS V_L1_BITS_REM | |
114 | #endif | |
115 | ||
116 | #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) | |
117 | ||
118 | #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) | |
119 | ||
5b6dd868 | 120 | uintptr_t qemu_host_page_size; |
0c2d70c4 | 121 | intptr_t qemu_host_page_mask; |
5b6dd868 | 122 | |
d1142fb8 | 123 | /* The bottom level has pointers to PageDesc */ |
5b6dd868 BS |
124 | static void *l1_map[V_L1_SIZE]; |
125 | ||
57fec1fe FB |
126 | /* code generation context */ |
127 | TCGContext tcg_ctx; | |
d19893da | 128 | |
677ef623 FK |
129 | /* translation block context */ |
130 | #ifdef CONFIG_USER_ONLY | |
131 | __thread int have_tb_lock; | |
132 | #endif | |
133 | ||
134 | void tb_lock(void) | |
135 | { | |
136 | #ifdef CONFIG_USER_ONLY | |
137 | assert(!have_tb_lock); | |
138 | qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); | |
139 | have_tb_lock++; | |
140 | #endif | |
141 | } | |
142 | ||
143 | void tb_unlock(void) | |
144 | { | |
145 | #ifdef CONFIG_USER_ONLY | |
146 | assert(have_tb_lock); | |
147 | have_tb_lock--; | |
148 | qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); | |
149 | #endif | |
150 | } | |
151 | ||
152 | void tb_lock_reset(void) | |
153 | { | |
154 | #ifdef CONFIG_USER_ONLY | |
155 | if (have_tb_lock) { | |
156 | qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); | |
157 | have_tb_lock = 0; | |
158 | } | |
159 | #endif | |
160 | } | |
161 | ||
5b6dd868 BS |
162 | static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, |
163 | tb_page_addr_t phys_page2); | |
a8a826a3 | 164 | static TranslationBlock *tb_find_pc(uintptr_t tc_ptr); |
5b6dd868 | 165 | |
57fec1fe FB |
166 | void cpu_gen_init(void) |
167 | { | |
168 | tcg_context_init(&tcg_ctx); | |
57fec1fe FB |
169 | } |
170 | ||
fca8a500 RH |
171 | /* Encode VAL as a signed leb128 sequence at P. |
172 | Return P incremented past the encoded value. */ | |
173 | static uint8_t *encode_sleb128(uint8_t *p, target_long val) | |
174 | { | |
175 | int more, byte; | |
176 | ||
177 | do { | |
178 | byte = val & 0x7f; | |
179 | val >>= 7; | |
180 | more = !((val == 0 && (byte & 0x40) == 0) | |
181 | || (val == -1 && (byte & 0x40) != 0)); | |
182 | if (more) { | |
183 | byte |= 0x80; | |
184 | } | |
185 | *p++ = byte; | |
186 | } while (more); | |
187 | ||
188 | return p; | |
189 | } | |
190 | ||
191 | /* Decode a signed leb128 sequence at *PP; increment *PP past the | |
192 | decoded value. Return the decoded value. */ | |
193 | static target_long decode_sleb128(uint8_t **pp) | |
194 | { | |
195 | uint8_t *p = *pp; | |
196 | target_long val = 0; | |
197 | int byte, shift = 0; | |
198 | ||
199 | do { | |
200 | byte = *p++; | |
201 | val |= (target_ulong)(byte & 0x7f) << shift; | |
202 | shift += 7; | |
203 | } while (byte & 0x80); | |
204 | if (shift < TARGET_LONG_BITS && (byte & 0x40)) { | |
205 | val |= -(target_ulong)1 << shift; | |
206 | } | |
207 | ||
208 | *pp = p; | |
209 | return val; | |
210 | } | |
211 | ||
212 | /* Encode the data collected about the instructions while compiling TB. | |
213 | Place the data at BLOCK, and return the number of bytes consumed. | |
214 | ||
215 | The logical table consisits of TARGET_INSN_START_WORDS target_ulong's, | |
216 | which come from the target's insn_start data, followed by a uintptr_t | |
217 | which comes from the host pc of the end of the code implementing the insn. | |
218 | ||
219 | Each line of the table is encoded as sleb128 deltas from the previous | |
220 | line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }. | |
221 | That is, the first column is seeded with the guest pc, the last column | |
222 | with the host pc, and the middle columns with zeros. */ | |
223 | ||
224 | static int encode_search(TranslationBlock *tb, uint8_t *block) | |
225 | { | |
b125f9dc | 226 | uint8_t *highwater = tcg_ctx.code_gen_highwater; |
fca8a500 RH |
227 | uint8_t *p = block; |
228 | int i, j, n; | |
229 | ||
230 | tb->tc_search = block; | |
231 | ||
232 | for (i = 0, n = tb->icount; i < n; ++i) { | |
233 | target_ulong prev; | |
234 | ||
235 | for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { | |
236 | if (i == 0) { | |
237 | prev = (j == 0 ? tb->pc : 0); | |
238 | } else { | |
239 | prev = tcg_ctx.gen_insn_data[i - 1][j]; | |
240 | } | |
241 | p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev); | |
242 | } | |
243 | prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]); | |
244 | p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev); | |
b125f9dc RH |
245 | |
246 | /* Test for (pending) buffer overflow. The assumption is that any | |
247 | one row beginning below the high water mark cannot overrun | |
248 | the buffer completely. Thus we can test for overflow after | |
249 | encoding a row without having to check during encoding. */ | |
250 | if (unlikely(p > highwater)) { | |
251 | return -1; | |
252 | } | |
fca8a500 RH |
253 | } |
254 | ||
255 | return p - block; | |
256 | } | |
257 | ||
fec88f64 | 258 | /* The cpu state corresponding to 'searched_pc' is restored. */ |
74f10515 | 259 | static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, |
a8a826a3 | 260 | uintptr_t searched_pc) |
d19893da | 261 | { |
fca8a500 RH |
262 | target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; |
263 | uintptr_t host_pc = (uintptr_t)tb->tc_ptr; | |
74f10515 | 264 | CPUArchState *env = cpu->env_ptr; |
fca8a500 RH |
265 | uint8_t *p = tb->tc_search; |
266 | int i, j, num_insns = tb->icount; | |
57fec1fe | 267 | #ifdef CONFIG_PROFILER |
fca8a500 | 268 | int64_t ti = profile_getclock(); |
57fec1fe FB |
269 | #endif |
270 | ||
fca8a500 RH |
271 | if (searched_pc < host_pc) { |
272 | return -1; | |
273 | } | |
d19893da | 274 | |
fca8a500 RH |
275 | /* Reconstruct the stored insn data while looking for the point at |
276 | which the end of the insn exceeds the searched_pc. */ | |
277 | for (i = 0; i < num_insns; ++i) { | |
278 | for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { | |
279 | data[j] += decode_sleb128(&p); | |
280 | } | |
281 | host_pc += decode_sleb128(&p); | |
282 | if (host_pc > searched_pc) { | |
283 | goto found; | |
284 | } | |
285 | } | |
286 | return -1; | |
3b46e624 | 287 | |
fca8a500 | 288 | found: |
bd79255d | 289 | if (tb->cflags & CF_USE_ICOUNT) { |
414b15c9 | 290 | assert(use_icount); |
2e70f6ef | 291 | /* Reset the cycle counter to the start of the block. */ |
fca8a500 | 292 | cpu->icount_decr.u16.low += num_insns; |
2e70f6ef | 293 | /* Clear the IO flag. */ |
99df7dce | 294 | cpu->can_do_io = 0; |
2e70f6ef | 295 | } |
fca8a500 RH |
296 | cpu->icount_decr.u16.low -= i; |
297 | restore_state_to_opc(env, tb, data); | |
57fec1fe FB |
298 | |
299 | #ifdef CONFIG_PROFILER | |
fca8a500 RH |
300 | tcg_ctx.restore_time += profile_getclock() - ti; |
301 | tcg_ctx.restore_count++; | |
57fec1fe | 302 | #endif |
d19893da FB |
303 | return 0; |
304 | } | |
5b6dd868 | 305 | |
3f38f309 | 306 | bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr) |
a8a826a3 BS |
307 | { |
308 | TranslationBlock *tb; | |
309 | ||
310 | tb = tb_find_pc(retaddr); | |
311 | if (tb) { | |
74f10515 | 312 | cpu_restore_state_from_tb(cpu, tb, retaddr); |
d8a499f1 PD |
313 | if (tb->cflags & CF_NOCACHE) { |
314 | /* one-shot translation, invalidate it immediately */ | |
315 | cpu->current_tb = NULL; | |
316 | tb_phys_invalidate(tb, -1); | |
317 | tb_free(tb); | |
318 | } | |
a8a826a3 BS |
319 | return true; |
320 | } | |
321 | return false; | |
322 | } | |
323 | ||
47c16ed5 | 324 | void page_size_init(void) |
5b6dd868 BS |
325 | { |
326 | /* NOTE: we can always suppose that qemu_host_page_size >= | |
327 | TARGET_PAGE_SIZE */ | |
5b6dd868 | 328 | qemu_real_host_page_size = getpagesize(); |
0c2d70c4 | 329 | qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size; |
5b6dd868 BS |
330 | if (qemu_host_page_size == 0) { |
331 | qemu_host_page_size = qemu_real_host_page_size; | |
332 | } | |
333 | if (qemu_host_page_size < TARGET_PAGE_SIZE) { | |
334 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
335 | } | |
0c2d70c4 | 336 | qemu_host_page_mask = -(intptr_t)qemu_host_page_size; |
47c16ed5 | 337 | } |
5b6dd868 | 338 | |
47c16ed5 AK |
339 | static void page_init(void) |
340 | { | |
341 | page_size_init(); | |
5b6dd868 BS |
342 | #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) |
343 | { | |
344 | #ifdef HAVE_KINFO_GETVMMAP | |
345 | struct kinfo_vmentry *freep; | |
346 | int i, cnt; | |
347 | ||
348 | freep = kinfo_getvmmap(getpid(), &cnt); | |
349 | if (freep) { | |
350 | mmap_lock(); | |
351 | for (i = 0; i < cnt; i++) { | |
352 | unsigned long startaddr, endaddr; | |
353 | ||
354 | startaddr = freep[i].kve_start; | |
355 | endaddr = freep[i].kve_end; | |
356 | if (h2g_valid(startaddr)) { | |
357 | startaddr = h2g(startaddr) & TARGET_PAGE_MASK; | |
358 | ||
359 | if (h2g_valid(endaddr)) { | |
360 | endaddr = h2g(endaddr); | |
361 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
362 | } else { | |
363 | #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS | |
364 | endaddr = ~0ul; | |
365 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
366 | #endif | |
367 | } | |
368 | } | |
369 | } | |
370 | free(freep); | |
371 | mmap_unlock(); | |
372 | } | |
373 | #else | |
374 | FILE *f; | |
375 | ||
376 | last_brk = (unsigned long)sbrk(0); | |
377 | ||
378 | f = fopen("/compat/linux/proc/self/maps", "r"); | |
379 | if (f) { | |
380 | mmap_lock(); | |
381 | ||
382 | do { | |
383 | unsigned long startaddr, endaddr; | |
384 | int n; | |
385 | ||
386 | n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); | |
387 | ||
388 | if (n == 2 && h2g_valid(startaddr)) { | |
389 | startaddr = h2g(startaddr) & TARGET_PAGE_MASK; | |
390 | ||
391 | if (h2g_valid(endaddr)) { | |
392 | endaddr = h2g(endaddr); | |
393 | } else { | |
394 | endaddr = ~0ul; | |
395 | } | |
396 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
397 | } | |
398 | } while (!feof(f)); | |
399 | ||
400 | fclose(f); | |
401 | mmap_unlock(); | |
402 | } | |
403 | #endif | |
404 | } | |
405 | #endif | |
406 | } | |
407 | ||
75692087 PB |
408 | /* If alloc=1: |
409 | * Called with mmap_lock held for user-mode emulation. | |
410 | */ | |
5b6dd868 BS |
411 | static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) |
412 | { | |
413 | PageDesc *pd; | |
414 | void **lp; | |
415 | int i; | |
416 | ||
5b6dd868 BS |
417 | /* Level 1. Always allocated. */ |
418 | lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1)); | |
419 | ||
420 | /* Level 2..N-1. */ | |
03f49957 | 421 | for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) { |
6940fab8 | 422 | void **p = atomic_rcu_read(lp); |
5b6dd868 BS |
423 | |
424 | if (p == NULL) { | |
425 | if (!alloc) { | |
426 | return NULL; | |
427 | } | |
e3a0abfd | 428 | p = g_new0(void *, V_L2_SIZE); |
6940fab8 | 429 | atomic_rcu_set(lp, p); |
5b6dd868 BS |
430 | } |
431 | ||
03f49957 | 432 | lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); |
5b6dd868 BS |
433 | } |
434 | ||
6940fab8 | 435 | pd = atomic_rcu_read(lp); |
5b6dd868 BS |
436 | if (pd == NULL) { |
437 | if (!alloc) { | |
438 | return NULL; | |
439 | } | |
e3a0abfd | 440 | pd = g_new0(PageDesc, V_L2_SIZE); |
6940fab8 | 441 | atomic_rcu_set(lp, pd); |
5b6dd868 BS |
442 | } |
443 | ||
03f49957 | 444 | return pd + (index & (V_L2_SIZE - 1)); |
5b6dd868 BS |
445 | } |
446 | ||
447 | static inline PageDesc *page_find(tb_page_addr_t index) | |
448 | { | |
449 | return page_find_alloc(index, 0); | |
450 | } | |
451 | ||
5b6dd868 BS |
452 | #if defined(CONFIG_USER_ONLY) |
453 | /* Currently it is not recommended to allocate big chunks of data in | |
454 | user mode. It will change when a dedicated libc will be used. */ | |
455 | /* ??? 64-bit hosts ought to have no problem mmaping data outside the | |
456 | region in which the guest needs to run. Revisit this. */ | |
457 | #define USE_STATIC_CODE_GEN_BUFFER | |
458 | #endif | |
459 | ||
5b6dd868 BS |
460 | /* Minimum size of the code gen buffer. This number is randomly chosen, |
461 | but not so small that we can't have a fair number of TB's live. */ | |
462 | #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) | |
463 | ||
464 | /* Maximum size of the code gen buffer we'd like to use. Unless otherwise | |
465 | indicated, this is constrained by the range of direct branches on the | |
466 | host cpu, as used by the TCG implementation of goto_tb. */ | |
467 | #if defined(__x86_64__) | |
468 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
469 | #elif defined(__sparc__) | |
470 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
5bfd75a3 RH |
471 | #elif defined(__powerpc64__) |
472 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
4a136e0a CF |
473 | #elif defined(__aarch64__) |
474 | # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) | |
5b6dd868 BS |
475 | #elif defined(__arm__) |
476 | # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024) | |
477 | #elif defined(__s390x__) | |
478 | /* We have a +- 4GB range on the branches; leave some slop. */ | |
479 | # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) | |
479eb121 RH |
480 | #elif defined(__mips__) |
481 | /* We have a 256MB branch region, but leave room to make sure the | |
482 | main executable is also within that region. */ | |
483 | # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) | |
5b6dd868 BS |
484 | #else |
485 | # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) | |
486 | #endif | |
487 | ||
488 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) | |
489 | ||
490 | #define DEFAULT_CODE_GEN_BUFFER_SIZE \ | |
491 | (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ | |
492 | ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) | |
493 | ||
494 | static inline size_t size_code_gen_buffer(size_t tb_size) | |
495 | { | |
496 | /* Size the buffer. */ | |
497 | if (tb_size == 0) { | |
498 | #ifdef USE_STATIC_CODE_GEN_BUFFER | |
499 | tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; | |
500 | #else | |
501 | /* ??? Needs adjustments. */ | |
502 | /* ??? If we relax the requirement that CONFIG_USER_ONLY use the | |
503 | static buffer, we could size this on RESERVED_VA, on the text | |
504 | segment size of the executable, or continue to use the default. */ | |
505 | tb_size = (unsigned long)(ram_size / 4); | |
506 | #endif | |
507 | } | |
508 | if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { | |
509 | tb_size = MIN_CODE_GEN_BUFFER_SIZE; | |
510 | } | |
511 | if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { | |
512 | tb_size = MAX_CODE_GEN_BUFFER_SIZE; | |
513 | } | |
0b0d3320 | 514 | tcg_ctx.code_gen_buffer_size = tb_size; |
5b6dd868 BS |
515 | return tb_size; |
516 | } | |
517 | ||
483c76e1 RH |
518 | #ifdef __mips__ |
519 | /* In order to use J and JAL within the code_gen_buffer, we require | |
520 | that the buffer not cross a 256MB boundary. */ | |
521 | static inline bool cross_256mb(void *addr, size_t size) | |
522 | { | |
523 | return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000; | |
524 | } | |
525 | ||
526 | /* We weren't able to allocate a buffer without crossing that boundary, | |
527 | so make do with the larger portion of the buffer that doesn't cross. | |
528 | Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ | |
529 | static inline void *split_cross_256mb(void *buf1, size_t size1) | |
530 | { | |
531 | void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000); | |
532 | size_t size2 = buf1 + size1 - buf2; | |
533 | ||
534 | size1 = buf2 - buf1; | |
535 | if (size1 < size2) { | |
536 | size1 = size2; | |
537 | buf1 = buf2; | |
538 | } | |
539 | ||
540 | tcg_ctx.code_gen_buffer_size = size1; | |
541 | return buf1; | |
542 | } | |
543 | #endif | |
544 | ||
5b6dd868 BS |
545 | #ifdef USE_STATIC_CODE_GEN_BUFFER |
546 | static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] | |
547 | __attribute__((aligned(CODE_GEN_ALIGN))); | |
548 | ||
f293709c RH |
549 | # ifdef _WIN32 |
550 | static inline void do_protect(void *addr, long size, int prot) | |
551 | { | |
552 | DWORD old_protect; | |
553 | VirtualProtect(addr, size, prot, &old_protect); | |
554 | } | |
555 | ||
556 | static inline void map_exec(void *addr, long size) | |
557 | { | |
558 | do_protect(addr, size, PAGE_EXECUTE_READWRITE); | |
559 | } | |
560 | ||
561 | static inline void map_none(void *addr, long size) | |
562 | { | |
563 | do_protect(addr, size, PAGE_NOACCESS); | |
564 | } | |
565 | # else | |
566 | static inline void do_protect(void *addr, long size, int prot) | |
567 | { | |
568 | uintptr_t start, end; | |
569 | ||
570 | start = (uintptr_t)addr; | |
571 | start &= qemu_real_host_page_mask; | |
572 | ||
573 | end = (uintptr_t)addr + size; | |
574 | end = ROUND_UP(end, qemu_real_host_page_size); | |
575 | ||
576 | mprotect((void *)start, end - start, prot); | |
577 | } | |
578 | ||
579 | static inline void map_exec(void *addr, long size) | |
580 | { | |
581 | do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC); | |
582 | } | |
583 | ||
584 | static inline void map_none(void *addr, long size) | |
585 | { | |
586 | do_protect(addr, size, PROT_NONE); | |
587 | } | |
588 | # endif /* WIN32 */ | |
589 | ||
5b6dd868 BS |
590 | static inline void *alloc_code_gen_buffer(void) |
591 | { | |
483c76e1 | 592 | void *buf = static_code_gen_buffer; |
f293709c RH |
593 | size_t full_size, size; |
594 | ||
595 | /* The size of the buffer, rounded down to end on a page boundary. */ | |
596 | full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer)) | |
597 | & qemu_real_host_page_mask) - (uintptr_t)buf; | |
598 | ||
599 | /* Reserve a guard page. */ | |
600 | size = full_size - qemu_real_host_page_size; | |
601 | ||
602 | /* Honor a command-line option limiting the size of the buffer. */ | |
603 | if (size > tcg_ctx.code_gen_buffer_size) { | |
604 | size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size) | |
605 | & qemu_real_host_page_mask) - (uintptr_t)buf; | |
606 | } | |
607 | tcg_ctx.code_gen_buffer_size = size; | |
608 | ||
483c76e1 | 609 | #ifdef __mips__ |
f293709c RH |
610 | if (cross_256mb(buf, size)) { |
611 | buf = split_cross_256mb(buf, size); | |
612 | size = tcg_ctx.code_gen_buffer_size; | |
483c76e1 RH |
613 | } |
614 | #endif | |
f293709c RH |
615 | |
616 | map_exec(buf, size); | |
617 | map_none(buf + size, qemu_real_host_page_size); | |
618 | qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); | |
619 | ||
483c76e1 | 620 | return buf; |
5b6dd868 | 621 | } |
f293709c RH |
622 | #elif defined(_WIN32) |
623 | static inline void *alloc_code_gen_buffer(void) | |
624 | { | |
625 | size_t size = tcg_ctx.code_gen_buffer_size; | |
626 | void *buf1, *buf2; | |
627 | ||
628 | /* Perform the allocation in two steps, so that the guard page | |
629 | is reserved but uncommitted. */ | |
630 | buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size, | |
631 | MEM_RESERVE, PAGE_NOACCESS); | |
632 | if (buf1 != NULL) { | |
633 | buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE); | |
634 | assert(buf1 == buf2); | |
635 | } | |
636 | ||
637 | return buf1; | |
638 | } | |
639 | #else | |
5b6dd868 BS |
640 | static inline void *alloc_code_gen_buffer(void) |
641 | { | |
642 | int flags = MAP_PRIVATE | MAP_ANONYMOUS; | |
643 | uintptr_t start = 0; | |
f293709c | 644 | size_t size = tcg_ctx.code_gen_buffer_size; |
5b6dd868 BS |
645 | void *buf; |
646 | ||
647 | /* Constrain the position of the buffer based on the host cpu. | |
648 | Note that these addresses are chosen in concert with the | |
649 | addresses assigned in the relevant linker script file. */ | |
650 | # if defined(__PIE__) || defined(__PIC__) | |
651 | /* Don't bother setting a preferred location if we're building | |
652 | a position-independent executable. We're more likely to get | |
653 | an address near the main executable if we let the kernel | |
654 | choose the address. */ | |
655 | # elif defined(__x86_64__) && defined(MAP_32BIT) | |
656 | /* Force the memory down into low memory with the executable. | |
657 | Leave the choice of exact location with the kernel. */ | |
658 | flags |= MAP_32BIT; | |
659 | /* Cannot expect to map more than 800MB in low memory. */ | |
f293709c RH |
660 | if (size > 800u * 1024 * 1024) { |
661 | tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024; | |
5b6dd868 BS |
662 | } |
663 | # elif defined(__sparc__) | |
664 | start = 0x40000000ul; | |
665 | # elif defined(__s390x__) | |
666 | start = 0x90000000ul; | |
479eb121 | 667 | # elif defined(__mips__) |
f293709c | 668 | # if _MIPS_SIM == _ABI64 |
479eb121 RH |
669 | start = 0x128000000ul; |
670 | # else | |
671 | start = 0x08000000ul; | |
672 | # endif | |
5b6dd868 BS |
673 | # endif |
674 | ||
f293709c RH |
675 | buf = mmap((void *)start, size + qemu_real_host_page_size, |
676 | PROT_NONE, flags, -1, 0); | |
483c76e1 RH |
677 | if (buf == MAP_FAILED) { |
678 | return NULL; | |
679 | } | |
680 | ||
681 | #ifdef __mips__ | |
f293709c | 682 | if (cross_256mb(buf, size)) { |
5d831be2 | 683 | /* Try again, with the original still mapped, to avoid re-acquiring |
483c76e1 | 684 | that 256mb crossing. This time don't specify an address. */ |
f293709c RH |
685 | size_t size2; |
686 | void *buf2 = mmap(NULL, size + qemu_real_host_page_size, | |
687 | PROT_NONE, flags, -1, 0); | |
688 | switch (buf2 != MAP_FAILED) { | |
689 | case 1: | |
690 | if (!cross_256mb(buf2, size)) { | |
483c76e1 | 691 | /* Success! Use the new buffer. */ |
f293709c RH |
692 | munmap(buf, size); |
693 | break; | |
483c76e1 RH |
694 | } |
695 | /* Failure. Work with what we had. */ | |
f293709c RH |
696 | munmap(buf2, size); |
697 | /* fallthru */ | |
698 | default: | |
699 | /* Split the original buffer. Free the smaller half. */ | |
700 | buf2 = split_cross_256mb(buf, size); | |
701 | size2 = tcg_ctx.code_gen_buffer_size; | |
702 | if (buf == buf2) { | |
703 | munmap(buf + size2 + qemu_real_host_page_size, size - size2); | |
704 | } else { | |
705 | munmap(buf, size - size2); | |
706 | } | |
707 | size = size2; | |
708 | break; | |
483c76e1 | 709 | } |
f293709c | 710 | buf = buf2; |
483c76e1 RH |
711 | } |
712 | #endif | |
713 | ||
f293709c RH |
714 | /* Make the final buffer accessible. The guard page at the end |
715 | will remain inaccessible with PROT_NONE. */ | |
716 | mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC); | |
483c76e1 | 717 | |
f293709c RH |
718 | /* Request large pages for the buffer. */ |
719 | qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); | |
483c76e1 | 720 | |
5b6dd868 BS |
721 | return buf; |
722 | } | |
f293709c | 723 | #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ |
5b6dd868 BS |
724 | |
725 | static inline void code_gen_alloc(size_t tb_size) | |
726 | { | |
0b0d3320 EV |
727 | tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size); |
728 | tcg_ctx.code_gen_buffer = alloc_code_gen_buffer(); | |
729 | if (tcg_ctx.code_gen_buffer == NULL) { | |
5b6dd868 BS |
730 | fprintf(stderr, "Could not allocate dynamic translator buffer\n"); |
731 | exit(1); | |
732 | } | |
733 | ||
8163b749 RH |
734 | /* Estimate a good size for the number of TBs we can support. We |
735 | still haven't deducted the prologue from the buffer size here, | |
736 | but that's minimal and won't affect the estimate much. */ | |
737 | tcg_ctx.code_gen_max_blocks | |
738 | = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; | |
739 | tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks); | |
740 | ||
677ef623 | 741 | qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); |
5b6dd868 BS |
742 | } |
743 | ||
744 | /* Must be called before using the QEMU cpus. 'tb_size' is the size | |
745 | (in bytes) allocated to the translation buffer. Zero means default | |
746 | size. */ | |
747 | void tcg_exec_init(unsigned long tb_size) | |
748 | { | |
749 | cpu_gen_init(); | |
5b6dd868 | 750 | page_init(); |
f293709c | 751 | code_gen_alloc(tb_size); |
4cbea598 | 752 | #if defined(CONFIG_SOFTMMU) |
5b6dd868 BS |
753 | /* There's no guest base to take into account, so go ahead and |
754 | initialize the prologue now. */ | |
755 | tcg_prologue_init(&tcg_ctx); | |
756 | #endif | |
757 | } | |
758 | ||
759 | bool tcg_enabled(void) | |
760 | { | |
0b0d3320 | 761 | return tcg_ctx.code_gen_buffer != NULL; |
5b6dd868 BS |
762 | } |
763 | ||
764 | /* Allocate a new translation block. Flush the translation buffer if | |
765 | too many translation blocks or too much generated code. */ | |
766 | static TranslationBlock *tb_alloc(target_ulong pc) | |
767 | { | |
768 | TranslationBlock *tb; | |
769 | ||
b125f9dc | 770 | if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) { |
5b6dd868 BS |
771 | return NULL; |
772 | } | |
5e5f07e0 | 773 | tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++]; |
5b6dd868 BS |
774 | tb->pc = pc; |
775 | tb->cflags = 0; | |
776 | return tb; | |
777 | } | |
778 | ||
779 | void tb_free(TranslationBlock *tb) | |
780 | { | |
781 | /* In practice this is mostly used for single use temporary TB | |
782 | Ignore the hard cases and just back up if this TB happens to | |
783 | be the last one generated. */ | |
5e5f07e0 EV |
784 | if (tcg_ctx.tb_ctx.nb_tbs > 0 && |
785 | tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) { | |
0b0d3320 | 786 | tcg_ctx.code_gen_ptr = tb->tc_ptr; |
5e5f07e0 | 787 | tcg_ctx.tb_ctx.nb_tbs--; |
5b6dd868 BS |
788 | } |
789 | } | |
790 | ||
791 | static inline void invalidate_page_bitmap(PageDesc *p) | |
792 | { | |
012aef07 MA |
793 | g_free(p->code_bitmap); |
794 | p->code_bitmap = NULL; | |
5b6dd868 BS |
795 | p->code_write_count = 0; |
796 | } | |
797 | ||
798 | /* Set to NULL all the 'first_tb' fields in all PageDescs. */ | |
799 | static void page_flush_tb_1(int level, void **lp) | |
800 | { | |
801 | int i; | |
802 | ||
803 | if (*lp == NULL) { | |
804 | return; | |
805 | } | |
806 | if (level == 0) { | |
807 | PageDesc *pd = *lp; | |
808 | ||
03f49957 | 809 | for (i = 0; i < V_L2_SIZE; ++i) { |
5b6dd868 BS |
810 | pd[i].first_tb = NULL; |
811 | invalidate_page_bitmap(pd + i); | |
812 | } | |
813 | } else { | |
814 | void **pp = *lp; | |
815 | ||
03f49957 | 816 | for (i = 0; i < V_L2_SIZE; ++i) { |
5b6dd868 BS |
817 | page_flush_tb_1(level - 1, pp + i); |
818 | } | |
819 | } | |
820 | } | |
821 | ||
822 | static void page_flush_tb(void) | |
823 | { | |
824 | int i; | |
825 | ||
826 | for (i = 0; i < V_L1_SIZE; i++) { | |
03f49957 | 827 | page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i); |
5b6dd868 BS |
828 | } |
829 | } | |
830 | ||
831 | /* flush all the translation blocks */ | |
832 | /* XXX: tb_flush is currently not thread safe */ | |
bbd77c18 | 833 | void tb_flush(CPUState *cpu) |
5b6dd868 | 834 | { |
5b6dd868 BS |
835 | #if defined(DEBUG_FLUSH) |
836 | printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", | |
0b0d3320 | 837 | (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer), |
5e5f07e0 | 838 | tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ? |
0b0d3320 | 839 | ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) / |
5e5f07e0 | 840 | tcg_ctx.tb_ctx.nb_tbs : 0); |
5b6dd868 | 841 | #endif |
0b0d3320 EV |
842 | if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) |
843 | > tcg_ctx.code_gen_buffer_size) { | |
a47dddd7 | 844 | cpu_abort(cpu, "Internal error: code buffer overflow\n"); |
5b6dd868 | 845 | } |
5e5f07e0 | 846 | tcg_ctx.tb_ctx.nb_tbs = 0; |
5b6dd868 | 847 | |
bdc44640 | 848 | CPU_FOREACH(cpu) { |
8cd70437 | 849 | memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); |
5b6dd868 BS |
850 | } |
851 | ||
eb2535f4 | 852 | memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash)); |
5b6dd868 BS |
853 | page_flush_tb(); |
854 | ||
0b0d3320 | 855 | tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer; |
5b6dd868 BS |
856 | /* XXX: flush processor icache at this point if cache flush is |
857 | expensive */ | |
5e5f07e0 | 858 | tcg_ctx.tb_ctx.tb_flush_count++; |
5b6dd868 BS |
859 | } |
860 | ||
861 | #ifdef DEBUG_TB_CHECK | |
862 | ||
863 | static void tb_invalidate_check(target_ulong address) | |
864 | { | |
865 | TranslationBlock *tb; | |
866 | int i; | |
867 | ||
868 | address &= TARGET_PAGE_MASK; | |
869 | for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { | |
5e5f07e0 | 870 | for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { |
5b6dd868 BS |
871 | if (!(address + TARGET_PAGE_SIZE <= tb->pc || |
872 | address >= tb->pc + tb->size)) { | |
873 | printf("ERROR invalidate: address=" TARGET_FMT_lx | |
874 | " PC=%08lx size=%04x\n", | |
875 | address, (long)tb->pc, tb->size); | |
876 | } | |
877 | } | |
878 | } | |
879 | } | |
880 | ||
881 | /* verify that all the pages have correct rights for code */ | |
882 | static void tb_page_check(void) | |
883 | { | |
884 | TranslationBlock *tb; | |
885 | int i, flags1, flags2; | |
886 | ||
887 | for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { | |
5e5f07e0 EV |
888 | for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL; |
889 | tb = tb->phys_hash_next) { | |
5b6dd868 BS |
890 | flags1 = page_get_flags(tb->pc); |
891 | flags2 = page_get_flags(tb->pc + tb->size - 1); | |
892 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | |
893 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | |
894 | (long)tb->pc, tb->size, flags1, flags2); | |
895 | } | |
896 | } | |
897 | } | |
898 | } | |
899 | ||
900 | #endif | |
901 | ||
0c884d16 | 902 | static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb) |
5b6dd868 BS |
903 | { |
904 | TranslationBlock *tb1; | |
905 | ||
906 | for (;;) { | |
907 | tb1 = *ptb; | |
908 | if (tb1 == tb) { | |
0c884d16 | 909 | *ptb = tb1->phys_hash_next; |
5b6dd868 BS |
910 | break; |
911 | } | |
0c884d16 | 912 | ptb = &tb1->phys_hash_next; |
5b6dd868 BS |
913 | } |
914 | } | |
915 | ||
916 | static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) | |
917 | { | |
918 | TranslationBlock *tb1; | |
919 | unsigned int n1; | |
920 | ||
921 | for (;;) { | |
922 | tb1 = *ptb; | |
923 | n1 = (uintptr_t)tb1 & 3; | |
924 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
925 | if (tb1 == tb) { | |
926 | *ptb = tb1->page_next[n1]; | |
927 | break; | |
928 | } | |
929 | ptb = &tb1->page_next[n1]; | |
930 | } | |
931 | } | |
932 | ||
933 | static inline void tb_jmp_remove(TranslationBlock *tb, int n) | |
934 | { | |
935 | TranslationBlock *tb1, **ptb; | |
936 | unsigned int n1; | |
937 | ||
938 | ptb = &tb->jmp_next[n]; | |
939 | tb1 = *ptb; | |
940 | if (tb1) { | |
941 | /* find tb(n) in circular list */ | |
942 | for (;;) { | |
943 | tb1 = *ptb; | |
944 | n1 = (uintptr_t)tb1 & 3; | |
945 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
946 | if (n1 == n && tb1 == tb) { | |
947 | break; | |
948 | } | |
949 | if (n1 == 2) { | |
950 | ptb = &tb1->jmp_first; | |
951 | } else { | |
952 | ptb = &tb1->jmp_next[n1]; | |
953 | } | |
954 | } | |
955 | /* now we can suppress tb(n) from the list */ | |
956 | *ptb = tb->jmp_next[n]; | |
957 | ||
958 | tb->jmp_next[n] = NULL; | |
959 | } | |
960 | } | |
961 | ||
962 | /* reset the jump entry 'n' of a TB so that it is not chained to | |
963 | another TB */ | |
964 | static inline void tb_reset_jump(TranslationBlock *tb, int n) | |
965 | { | |
966 | tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n])); | |
967 | } | |
968 | ||
0c884d16 | 969 | /* invalidate one TB */ |
5b6dd868 BS |
970 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) |
971 | { | |
182735ef | 972 | CPUState *cpu; |
5b6dd868 BS |
973 | PageDesc *p; |
974 | unsigned int h, n1; | |
975 | tb_page_addr_t phys_pc; | |
976 | TranslationBlock *tb1, *tb2; | |
977 | ||
978 | /* remove the TB from the hash list */ | |
979 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
980 | h = tb_phys_hash_func(phys_pc); | |
5e5f07e0 | 981 | tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb); |
5b6dd868 BS |
982 | |
983 | /* remove the TB from the page list */ | |
984 | if (tb->page_addr[0] != page_addr) { | |
985 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | |
986 | tb_page_remove(&p->first_tb, tb); | |
987 | invalidate_page_bitmap(p); | |
988 | } | |
989 | if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { | |
990 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | |
991 | tb_page_remove(&p->first_tb, tb); | |
992 | invalidate_page_bitmap(p); | |
993 | } | |
994 | ||
5e5f07e0 | 995 | tcg_ctx.tb_ctx.tb_invalidated_flag = 1; |
5b6dd868 BS |
996 | |
997 | /* remove the TB from the hash list */ | |
998 | h = tb_jmp_cache_hash_func(tb->pc); | |
bdc44640 | 999 | CPU_FOREACH(cpu) { |
8cd70437 AF |
1000 | if (cpu->tb_jmp_cache[h] == tb) { |
1001 | cpu->tb_jmp_cache[h] = NULL; | |
5b6dd868 BS |
1002 | } |
1003 | } | |
1004 | ||
1005 | /* suppress this TB from the two jump lists */ | |
1006 | tb_jmp_remove(tb, 0); | |
1007 | tb_jmp_remove(tb, 1); | |
1008 | ||
1009 | /* suppress any remaining jumps to this TB */ | |
1010 | tb1 = tb->jmp_first; | |
1011 | for (;;) { | |
1012 | n1 = (uintptr_t)tb1 & 3; | |
1013 | if (n1 == 2) { | |
1014 | break; | |
1015 | } | |
1016 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
1017 | tb2 = tb1->jmp_next[n1]; | |
1018 | tb_reset_jump(tb1, n1); | |
1019 | tb1->jmp_next[n1] = NULL; | |
1020 | tb1 = tb2; | |
1021 | } | |
1022 | tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */ | |
1023 | ||
5e5f07e0 | 1024 | tcg_ctx.tb_ctx.tb_phys_invalidate_count++; |
5b6dd868 BS |
1025 | } |
1026 | ||
5b6dd868 BS |
1027 | static void build_page_bitmap(PageDesc *p) |
1028 | { | |
1029 | int n, tb_start, tb_end; | |
1030 | TranslationBlock *tb; | |
1031 | ||
510a647f | 1032 | p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); |
5b6dd868 BS |
1033 | |
1034 | tb = p->first_tb; | |
1035 | while (tb != NULL) { | |
1036 | n = (uintptr_t)tb & 3; | |
1037 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1038 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
1039 | if (n == 0) { | |
1040 | /* NOTE: tb_end may be after the end of the page, but | |
1041 | it is not a problem */ | |
1042 | tb_start = tb->pc & ~TARGET_PAGE_MASK; | |
1043 | tb_end = tb_start + tb->size; | |
1044 | if (tb_end > TARGET_PAGE_SIZE) { | |
1045 | tb_end = TARGET_PAGE_SIZE; | |
1046 | } | |
1047 | } else { | |
1048 | tb_start = 0; | |
1049 | tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
1050 | } | |
510a647f | 1051 | bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); |
5b6dd868 BS |
1052 | tb = tb->page_next[n]; |
1053 | } | |
1054 | } | |
1055 | ||
75692087 | 1056 | /* Called with mmap_lock held for user mode emulation. */ |
648f034c | 1057 | TranslationBlock *tb_gen_code(CPUState *cpu, |
5b6dd868 BS |
1058 | target_ulong pc, target_ulong cs_base, |
1059 | int flags, int cflags) | |
1060 | { | |
648f034c | 1061 | CPUArchState *env = cpu->env_ptr; |
5b6dd868 | 1062 | TranslationBlock *tb; |
5b6dd868 BS |
1063 | tb_page_addr_t phys_pc, phys_page2; |
1064 | target_ulong virt_page2; | |
fec88f64 | 1065 | tcg_insn_unit *gen_code_buf; |
fca8a500 | 1066 | int gen_code_size, search_size; |
fec88f64 RH |
1067 | #ifdef CONFIG_PROFILER |
1068 | int64_t ti; | |
1069 | #endif | |
5b6dd868 BS |
1070 | |
1071 | phys_pc = get_page_addr_code(env, pc); | |
56c0269a | 1072 | if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) { |
0266359e PB |
1073 | cflags |= CF_USE_ICOUNT; |
1074 | } | |
b125f9dc | 1075 | |
5b6dd868 | 1076 | tb = tb_alloc(pc); |
b125f9dc RH |
1077 | if (unlikely(!tb)) { |
1078 | buffer_overflow: | |
5b6dd868 | 1079 | /* flush must be done */ |
bbd77c18 | 1080 | tb_flush(cpu); |
5b6dd868 BS |
1081 | /* cannot fail at this point */ |
1082 | tb = tb_alloc(pc); | |
b125f9dc | 1083 | assert(tb != NULL); |
5b6dd868 | 1084 | /* Don't forget to invalidate previous TB info. */ |
5e5f07e0 | 1085 | tcg_ctx.tb_ctx.tb_invalidated_flag = 1; |
5b6dd868 | 1086 | } |
fec88f64 RH |
1087 | |
1088 | gen_code_buf = tcg_ctx.code_gen_ptr; | |
1089 | tb->tc_ptr = gen_code_buf; | |
5b6dd868 BS |
1090 | tb->cs_base = cs_base; |
1091 | tb->flags = flags; | |
1092 | tb->cflags = cflags; | |
fec88f64 RH |
1093 | |
1094 | #ifdef CONFIG_PROFILER | |
1095 | tcg_ctx.tb_count1++; /* includes aborted translations because of | |
1096 | exceptions */ | |
1097 | ti = profile_getclock(); | |
1098 | #endif | |
1099 | ||
1100 | tcg_func_start(&tcg_ctx); | |
1101 | ||
1102 | gen_intermediate_code(env, tb); | |
1103 | ||
1104 | trace_translate_block(tb, tb->pc, tb->tc_ptr); | |
1105 | ||
1106 | /* generate machine code */ | |
1107 | tb->tb_next_offset[0] = 0xffff; | |
1108 | tb->tb_next_offset[1] = 0xffff; | |
1109 | tcg_ctx.tb_next_offset = tb->tb_next_offset; | |
1110 | #ifdef USE_DIRECT_JUMP | |
1111 | tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset; | |
1112 | tcg_ctx.tb_next = NULL; | |
1113 | #else | |
1114 | tcg_ctx.tb_jmp_offset = NULL; | |
1115 | tcg_ctx.tb_next = tb->tb_next; | |
1116 | #endif | |
1117 | ||
1118 | #ifdef CONFIG_PROFILER | |
1119 | tcg_ctx.tb_count++; | |
1120 | tcg_ctx.interm_time += profile_getclock() - ti; | |
1121 | tcg_ctx.code_time -= profile_getclock(); | |
1122 | #endif | |
1123 | ||
b125f9dc RH |
1124 | /* ??? Overflow could be handled better here. In particular, we |
1125 | don't need to re-do gen_intermediate_code, nor should we re-do | |
1126 | the tcg optimization currently hidden inside tcg_gen_code. All | |
1127 | that should be required is to flush the TBs, allocate a new TB, | |
1128 | re-initialize it per above, and re-do the actual code generation. */ | |
fec88f64 | 1129 | gen_code_size = tcg_gen_code(&tcg_ctx, gen_code_buf); |
b125f9dc RH |
1130 | if (unlikely(gen_code_size < 0)) { |
1131 | goto buffer_overflow; | |
1132 | } | |
fca8a500 | 1133 | search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); |
b125f9dc RH |
1134 | if (unlikely(search_size < 0)) { |
1135 | goto buffer_overflow; | |
1136 | } | |
fec88f64 RH |
1137 | |
1138 | #ifdef CONFIG_PROFILER | |
1139 | tcg_ctx.code_time += profile_getclock(); | |
1140 | tcg_ctx.code_in_len += tb->size; | |
1141 | tcg_ctx.code_out_len += gen_code_size; | |
fca8a500 | 1142 | tcg_ctx.search_out_len += search_size; |
fec88f64 RH |
1143 | #endif |
1144 | ||
1145 | #ifdef DEBUG_DISAS | |
1146 | if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { | |
1147 | qemu_log("OUT: [size=%d]\n", gen_code_size); | |
1148 | log_disas(tb->tc_ptr, gen_code_size); | |
1149 | qemu_log("\n"); | |
1150 | qemu_log_flush(); | |
1151 | } | |
1152 | #endif | |
1153 | ||
fca8a500 RH |
1154 | tcg_ctx.code_gen_ptr = (void *) |
1155 | ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, | |
1156 | CODE_GEN_ALIGN); | |
5b6dd868 BS |
1157 | |
1158 | /* check next page if needed */ | |
1159 | virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; | |
1160 | phys_page2 = -1; | |
1161 | if ((pc & TARGET_PAGE_MASK) != virt_page2) { | |
1162 | phys_page2 = get_page_addr_code(env, virt_page2); | |
1163 | } | |
1164 | tb_link_page(tb, phys_pc, phys_page2); | |
1165 | return tb; | |
1166 | } | |
1167 | ||
1168 | /* | |
1169 | * Invalidate all TBs which intersect with the target physical address range | |
1170 | * [start;end[. NOTE: start and end may refer to *different* physical pages. | |
1171 | * 'is_cpu_write_access' should be true if called from a real cpu write | |
1172 | * access: the virtual CPU will exit the current TB if code is modified inside | |
1173 | * this TB. | |
75692087 PB |
1174 | * |
1175 | * Called with mmap_lock held for user-mode emulation | |
5b6dd868 | 1176 | */ |
35865339 | 1177 | void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) |
5b6dd868 BS |
1178 | { |
1179 | while (start < end) { | |
35865339 | 1180 | tb_invalidate_phys_page_range(start, end, 0); |
5b6dd868 BS |
1181 | start &= TARGET_PAGE_MASK; |
1182 | start += TARGET_PAGE_SIZE; | |
1183 | } | |
1184 | } | |
1185 | ||
1186 | /* | |
1187 | * Invalidate all TBs which intersect with the target physical address range | |
1188 | * [start;end[. NOTE: start and end must refer to the *same* physical page. | |
1189 | * 'is_cpu_write_access' should be true if called from a real cpu write | |
1190 | * access: the virtual CPU will exit the current TB if code is modified inside | |
1191 | * this TB. | |
75692087 PB |
1192 | * |
1193 | * Called with mmap_lock held for user-mode emulation | |
5b6dd868 BS |
1194 | */ |
1195 | void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, | |
1196 | int is_cpu_write_access) | |
1197 | { | |
1198 | TranslationBlock *tb, *tb_next, *saved_tb; | |
4917cf44 | 1199 | CPUState *cpu = current_cpu; |
baea4fae | 1200 | #if defined(TARGET_HAS_PRECISE_SMC) |
4917cf44 AF |
1201 | CPUArchState *env = NULL; |
1202 | #endif | |
5b6dd868 BS |
1203 | tb_page_addr_t tb_start, tb_end; |
1204 | PageDesc *p; | |
1205 | int n; | |
1206 | #ifdef TARGET_HAS_PRECISE_SMC | |
1207 | int current_tb_not_found = is_cpu_write_access; | |
1208 | TranslationBlock *current_tb = NULL; | |
1209 | int current_tb_modified = 0; | |
1210 | target_ulong current_pc = 0; | |
1211 | target_ulong current_cs_base = 0; | |
1212 | int current_flags = 0; | |
1213 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1214 | ||
1215 | p = page_find(start >> TARGET_PAGE_BITS); | |
1216 | if (!p) { | |
1217 | return; | |
1218 | } | |
baea4fae | 1219 | #if defined(TARGET_HAS_PRECISE_SMC) |
4917cf44 AF |
1220 | if (cpu != NULL) { |
1221 | env = cpu->env_ptr; | |
d77953b9 | 1222 | } |
4917cf44 | 1223 | #endif |
5b6dd868 BS |
1224 | |
1225 | /* we remove all the TBs in the range [start, end[ */ | |
1226 | /* XXX: see if in some cases it could be faster to invalidate all | |
1227 | the code */ | |
1228 | tb = p->first_tb; | |
1229 | while (tb != NULL) { | |
1230 | n = (uintptr_t)tb & 3; | |
1231 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1232 | tb_next = tb->page_next[n]; | |
1233 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
1234 | if (n == 0) { | |
1235 | /* NOTE: tb_end may be after the end of the page, but | |
1236 | it is not a problem */ | |
1237 | tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
1238 | tb_end = tb_start + tb->size; | |
1239 | } else { | |
1240 | tb_start = tb->page_addr[1]; | |
1241 | tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
1242 | } | |
1243 | if (!(tb_end <= start || tb_start >= end)) { | |
1244 | #ifdef TARGET_HAS_PRECISE_SMC | |
1245 | if (current_tb_not_found) { | |
1246 | current_tb_not_found = 0; | |
1247 | current_tb = NULL; | |
93afeade | 1248 | if (cpu->mem_io_pc) { |
5b6dd868 | 1249 | /* now we have a real cpu fault */ |
93afeade | 1250 | current_tb = tb_find_pc(cpu->mem_io_pc); |
5b6dd868 BS |
1251 | } |
1252 | } | |
1253 | if (current_tb == tb && | |
1254 | (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
1255 | /* If we are modifying the current TB, we must stop | |
1256 | its execution. We could be more precise by checking | |
1257 | that the modification is after the current PC, but it | |
1258 | would require a specialized function to partially | |
1259 | restore the CPU state */ | |
1260 | ||
1261 | current_tb_modified = 1; | |
74f10515 | 1262 | cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc); |
5b6dd868 BS |
1263 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
1264 | ¤t_flags); | |
1265 | } | |
1266 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1267 | /* we need to do that to handle the case where a signal | |
1268 | occurs while doing tb_phys_invalidate() */ | |
1269 | saved_tb = NULL; | |
d77953b9 AF |
1270 | if (cpu != NULL) { |
1271 | saved_tb = cpu->current_tb; | |
1272 | cpu->current_tb = NULL; | |
5b6dd868 BS |
1273 | } |
1274 | tb_phys_invalidate(tb, -1); | |
d77953b9 AF |
1275 | if (cpu != NULL) { |
1276 | cpu->current_tb = saved_tb; | |
c3affe56 AF |
1277 | if (cpu->interrupt_request && cpu->current_tb) { |
1278 | cpu_interrupt(cpu, cpu->interrupt_request); | |
5b6dd868 BS |
1279 | } |
1280 | } | |
1281 | } | |
1282 | tb = tb_next; | |
1283 | } | |
1284 | #if !defined(CONFIG_USER_ONLY) | |
1285 | /* if no code remaining, no need to continue to use slow writes */ | |
1286 | if (!p->first_tb) { | |
1287 | invalidate_page_bitmap(p); | |
fc377bcf | 1288 | tlb_unprotect_code(start); |
5b6dd868 BS |
1289 | } |
1290 | #endif | |
1291 | #ifdef TARGET_HAS_PRECISE_SMC | |
1292 | if (current_tb_modified) { | |
1293 | /* we generate a block containing just the instruction | |
1294 | modifying the memory. It will ensure that it cannot modify | |
1295 | itself */ | |
d77953b9 | 1296 | cpu->current_tb = NULL; |
648f034c | 1297 | tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); |
0ea8cb88 | 1298 | cpu_resume_from_signal(cpu, NULL); |
5b6dd868 BS |
1299 | } |
1300 | #endif | |
1301 | } | |
1302 | ||
1303 | /* len must be <= 8 and start must be a multiple of len */ | |
1304 | void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) | |
1305 | { | |
1306 | PageDesc *p; | |
5b6dd868 BS |
1307 | |
1308 | #if 0 | |
1309 | if (1) { | |
1310 | qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", | |
1311 | cpu_single_env->mem_io_vaddr, len, | |
1312 | cpu_single_env->eip, | |
1313 | cpu_single_env->eip + | |
1314 | (intptr_t)cpu_single_env->segs[R_CS].base); | |
1315 | } | |
1316 | #endif | |
1317 | p = page_find(start >> TARGET_PAGE_BITS); | |
1318 | if (!p) { | |
1319 | return; | |
1320 | } | |
fc377bcf PB |
1321 | if (!p->code_bitmap && |
1322 | ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { | |
1323 | /* build code bitmap */ | |
1324 | build_page_bitmap(p); | |
1325 | } | |
5b6dd868 | 1326 | if (p->code_bitmap) { |
510a647f EC |
1327 | unsigned int nr; |
1328 | unsigned long b; | |
1329 | ||
1330 | nr = start & ~TARGET_PAGE_MASK; | |
1331 | b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); | |
5b6dd868 BS |
1332 | if (b & ((1 << len) - 1)) { |
1333 | goto do_invalidate; | |
1334 | } | |
1335 | } else { | |
1336 | do_invalidate: | |
1337 | tb_invalidate_phys_page_range(start, start + len, 1); | |
1338 | } | |
1339 | } | |
1340 | ||
1341 | #if !defined(CONFIG_SOFTMMU) | |
75692087 | 1342 | /* Called with mmap_lock held. */ |
5b6dd868 | 1343 | static void tb_invalidate_phys_page(tb_page_addr_t addr, |
d02532f0 AG |
1344 | uintptr_t pc, void *puc, |
1345 | bool locked) | |
5b6dd868 BS |
1346 | { |
1347 | TranslationBlock *tb; | |
1348 | PageDesc *p; | |
1349 | int n; | |
1350 | #ifdef TARGET_HAS_PRECISE_SMC | |
1351 | TranslationBlock *current_tb = NULL; | |
4917cf44 AF |
1352 | CPUState *cpu = current_cpu; |
1353 | CPUArchState *env = NULL; | |
5b6dd868 BS |
1354 | int current_tb_modified = 0; |
1355 | target_ulong current_pc = 0; | |
1356 | target_ulong current_cs_base = 0; | |
1357 | int current_flags = 0; | |
1358 | #endif | |
1359 | ||
1360 | addr &= TARGET_PAGE_MASK; | |
1361 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1362 | if (!p) { | |
1363 | return; | |
1364 | } | |
1365 | tb = p->first_tb; | |
1366 | #ifdef TARGET_HAS_PRECISE_SMC | |
1367 | if (tb && pc != 0) { | |
1368 | current_tb = tb_find_pc(pc); | |
1369 | } | |
4917cf44 AF |
1370 | if (cpu != NULL) { |
1371 | env = cpu->env_ptr; | |
d77953b9 | 1372 | } |
5b6dd868 BS |
1373 | #endif |
1374 | while (tb != NULL) { | |
1375 | n = (uintptr_t)tb & 3; | |
1376 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1377 | #ifdef TARGET_HAS_PRECISE_SMC | |
1378 | if (current_tb == tb && | |
1379 | (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
1380 | /* If we are modifying the current TB, we must stop | |
1381 | its execution. We could be more precise by checking | |
1382 | that the modification is after the current PC, but it | |
1383 | would require a specialized function to partially | |
1384 | restore the CPU state */ | |
1385 | ||
1386 | current_tb_modified = 1; | |
74f10515 | 1387 | cpu_restore_state_from_tb(cpu, current_tb, pc); |
5b6dd868 BS |
1388 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
1389 | ¤t_flags); | |
1390 | } | |
1391 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1392 | tb_phys_invalidate(tb, addr); | |
1393 | tb = tb->page_next[n]; | |
1394 | } | |
1395 | p->first_tb = NULL; | |
1396 | #ifdef TARGET_HAS_PRECISE_SMC | |
1397 | if (current_tb_modified) { | |
1398 | /* we generate a block containing just the instruction | |
1399 | modifying the memory. It will ensure that it cannot modify | |
1400 | itself */ | |
d77953b9 | 1401 | cpu->current_tb = NULL; |
648f034c | 1402 | tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); |
d02532f0 AG |
1403 | if (locked) { |
1404 | mmap_unlock(); | |
1405 | } | |
0ea8cb88 | 1406 | cpu_resume_from_signal(cpu, puc); |
5b6dd868 BS |
1407 | } |
1408 | #endif | |
1409 | } | |
1410 | #endif | |
1411 | ||
75692087 PB |
1412 | /* add the tb in the target page and protect it if necessary |
1413 | * | |
1414 | * Called with mmap_lock held for user-mode emulation. | |
1415 | */ | |
5b6dd868 BS |
1416 | static inline void tb_alloc_page(TranslationBlock *tb, |
1417 | unsigned int n, tb_page_addr_t page_addr) | |
1418 | { | |
1419 | PageDesc *p; | |
1420 | #ifndef CONFIG_USER_ONLY | |
1421 | bool page_already_protected; | |
1422 | #endif | |
1423 | ||
1424 | tb->page_addr[n] = page_addr; | |
1425 | p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); | |
1426 | tb->page_next[n] = p->first_tb; | |
1427 | #ifndef CONFIG_USER_ONLY | |
1428 | page_already_protected = p->first_tb != NULL; | |
1429 | #endif | |
1430 | p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); | |
1431 | invalidate_page_bitmap(p); | |
1432 | ||
5b6dd868 BS |
1433 | #if defined(CONFIG_USER_ONLY) |
1434 | if (p->flags & PAGE_WRITE) { | |
1435 | target_ulong addr; | |
1436 | PageDesc *p2; | |
1437 | int prot; | |
1438 | ||
1439 | /* force the host page as non writable (writes will have a | |
1440 | page fault + mprotect overhead) */ | |
1441 | page_addr &= qemu_host_page_mask; | |
1442 | prot = 0; | |
1443 | for (addr = page_addr; addr < page_addr + qemu_host_page_size; | |
1444 | addr += TARGET_PAGE_SIZE) { | |
1445 | ||
1446 | p2 = page_find(addr >> TARGET_PAGE_BITS); | |
1447 | if (!p2) { | |
1448 | continue; | |
1449 | } | |
1450 | prot |= p2->flags; | |
1451 | p2->flags &= ~PAGE_WRITE; | |
1452 | } | |
1453 | mprotect(g2h(page_addr), qemu_host_page_size, | |
1454 | (prot & PAGE_BITS) & ~PAGE_WRITE); | |
1455 | #ifdef DEBUG_TB_INVALIDATE | |
1456 | printf("protecting code page: 0x" TARGET_FMT_lx "\n", | |
1457 | page_addr); | |
1458 | #endif | |
1459 | } | |
1460 | #else | |
1461 | /* if some code is already present, then the pages are already | |
1462 | protected. So we handle the case where only the first TB is | |
1463 | allocated in a physical page */ | |
1464 | if (!page_already_protected) { | |
1465 | tlb_protect_code(page_addr); | |
1466 | } | |
1467 | #endif | |
5b6dd868 BS |
1468 | } |
1469 | ||
1470 | /* add a new TB and link it to the physical page tables. phys_page2 is | |
75692087 | 1471 | * (-1) to indicate that only one page contains the TB. |
9fd1a948 PB |
1472 | * |
1473 | * Called with mmap_lock held for user-mode emulation. | |
75692087 | 1474 | */ |
5b6dd868 BS |
1475 | static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, |
1476 | tb_page_addr_t phys_page2) | |
1477 | { | |
1478 | unsigned int h; | |
1479 | TranslationBlock **ptb; | |
1480 | ||
5b6dd868 BS |
1481 | /* add in the physical hash table */ |
1482 | h = tb_phys_hash_func(phys_pc); | |
5e5f07e0 | 1483 | ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h]; |
5b6dd868 BS |
1484 | tb->phys_hash_next = *ptb; |
1485 | *ptb = tb; | |
1486 | ||
1487 | /* add in the page list */ | |
1488 | tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); | |
1489 | if (phys_page2 != -1) { | |
1490 | tb_alloc_page(tb, 1, phys_page2); | |
1491 | } else { | |
1492 | tb->page_addr[1] = -1; | |
1493 | } | |
1494 | ||
1495 | tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); | |
1496 | tb->jmp_next[0] = NULL; | |
1497 | tb->jmp_next[1] = NULL; | |
1498 | ||
1499 | /* init original jump addresses */ | |
1500 | if (tb->tb_next_offset[0] != 0xffff) { | |
1501 | tb_reset_jump(tb, 0); | |
1502 | } | |
1503 | if (tb->tb_next_offset[1] != 0xffff) { | |
1504 | tb_reset_jump(tb, 1); | |
1505 | } | |
1506 | ||
1507 | #ifdef DEBUG_TB_CHECK | |
1508 | tb_page_check(); | |
1509 | #endif | |
5b6dd868 BS |
1510 | } |
1511 | ||
5b6dd868 BS |
1512 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
1513 | tb[1].tc_ptr. Return NULL if not found */ | |
a8a826a3 | 1514 | static TranslationBlock *tb_find_pc(uintptr_t tc_ptr) |
5b6dd868 BS |
1515 | { |
1516 | int m_min, m_max, m; | |
1517 | uintptr_t v; | |
1518 | TranslationBlock *tb; | |
1519 | ||
5e5f07e0 | 1520 | if (tcg_ctx.tb_ctx.nb_tbs <= 0) { |
5b6dd868 BS |
1521 | return NULL; |
1522 | } | |
0b0d3320 EV |
1523 | if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer || |
1524 | tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) { | |
5b6dd868 BS |
1525 | return NULL; |
1526 | } | |
1527 | /* binary search (cf Knuth) */ | |
1528 | m_min = 0; | |
5e5f07e0 | 1529 | m_max = tcg_ctx.tb_ctx.nb_tbs - 1; |
5b6dd868 BS |
1530 | while (m_min <= m_max) { |
1531 | m = (m_min + m_max) >> 1; | |
5e5f07e0 | 1532 | tb = &tcg_ctx.tb_ctx.tbs[m]; |
5b6dd868 BS |
1533 | v = (uintptr_t)tb->tc_ptr; |
1534 | if (v == tc_ptr) { | |
1535 | return tb; | |
1536 | } else if (tc_ptr < v) { | |
1537 | m_max = m - 1; | |
1538 | } else { | |
1539 | m_min = m + 1; | |
1540 | } | |
1541 | } | |
5e5f07e0 | 1542 | return &tcg_ctx.tb_ctx.tbs[m_max]; |
5b6dd868 BS |
1543 | } |
1544 | ||
ec53b45b | 1545 | #if !defined(CONFIG_USER_ONLY) |
29d8ec7b | 1546 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) |
5b6dd868 BS |
1547 | { |
1548 | ram_addr_t ram_addr; | |
5c8a00ce | 1549 | MemoryRegion *mr; |
149f54b5 | 1550 | hwaddr l = 1; |
5b6dd868 | 1551 | |
41063e1e | 1552 | rcu_read_lock(); |
29d8ec7b | 1553 | mr = address_space_translate(as, addr, &addr, &l, false); |
5c8a00ce PB |
1554 | if (!(memory_region_is_ram(mr) |
1555 | || memory_region_is_romd(mr))) { | |
41063e1e | 1556 | rcu_read_unlock(); |
5b6dd868 BS |
1557 | return; |
1558 | } | |
5c8a00ce | 1559 | ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK) |
149f54b5 | 1560 | + addr; |
5b6dd868 | 1561 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
41063e1e | 1562 | rcu_read_unlock(); |
5b6dd868 | 1563 | } |
ec53b45b | 1564 | #endif /* !defined(CONFIG_USER_ONLY) */ |
5b6dd868 | 1565 | |
239c51a5 | 1566 | void tb_check_watchpoint(CPUState *cpu) |
5b6dd868 BS |
1567 | { |
1568 | TranslationBlock *tb; | |
1569 | ||
93afeade | 1570 | tb = tb_find_pc(cpu->mem_io_pc); |
8d302e76 AJ |
1571 | if (tb) { |
1572 | /* We can use retranslation to find the PC. */ | |
1573 | cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc); | |
1574 | tb_phys_invalidate(tb, -1); | |
1575 | } else { | |
1576 | /* The exception probably happened in a helper. The CPU state should | |
1577 | have been saved before calling it. Fetch the PC from there. */ | |
1578 | CPUArchState *env = cpu->env_ptr; | |
1579 | target_ulong pc, cs_base; | |
1580 | tb_page_addr_t addr; | |
1581 | int flags; | |
1582 | ||
1583 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | |
1584 | addr = get_page_addr_code(env, pc); | |
1585 | tb_invalidate_phys_range(addr, addr + 1); | |
5b6dd868 | 1586 | } |
5b6dd868 BS |
1587 | } |
1588 | ||
1589 | #ifndef CONFIG_USER_ONLY | |
5b6dd868 BS |
1590 | /* in deterministic execution mode, instructions doing device I/Os |
1591 | must be at the end of the TB */ | |
90b40a69 | 1592 | void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) |
5b6dd868 | 1593 | { |
a47dddd7 | 1594 | #if defined(TARGET_MIPS) || defined(TARGET_SH4) |
90b40a69 | 1595 | CPUArchState *env = cpu->env_ptr; |
a47dddd7 | 1596 | #endif |
5b6dd868 BS |
1597 | TranslationBlock *tb; |
1598 | uint32_t n, cflags; | |
1599 | target_ulong pc, cs_base; | |
1600 | uint64_t flags; | |
1601 | ||
1602 | tb = tb_find_pc(retaddr); | |
1603 | if (!tb) { | |
a47dddd7 | 1604 | cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", |
5b6dd868 BS |
1605 | (void *)retaddr); |
1606 | } | |
28ecfd7a | 1607 | n = cpu->icount_decr.u16.low + tb->icount; |
74f10515 | 1608 | cpu_restore_state_from_tb(cpu, tb, retaddr); |
5b6dd868 BS |
1609 | /* Calculate how many instructions had been executed before the fault |
1610 | occurred. */ | |
28ecfd7a | 1611 | n = n - cpu->icount_decr.u16.low; |
5b6dd868 BS |
1612 | /* Generate a new TB ending on the I/O insn. */ |
1613 | n++; | |
1614 | /* On MIPS and SH, delay slot instructions can only be restarted if | |
1615 | they were already the first instruction in the TB. If this is not | |
1616 | the first instruction in a TB then re-execute the preceding | |
1617 | branch. */ | |
1618 | #if defined(TARGET_MIPS) | |
1619 | if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { | |
c3577479 | 1620 | env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); |
28ecfd7a | 1621 | cpu->icount_decr.u16.low++; |
5b6dd868 BS |
1622 | env->hflags &= ~MIPS_HFLAG_BMASK; |
1623 | } | |
1624 | #elif defined(TARGET_SH4) | |
1625 | if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 | |
1626 | && n > 1) { | |
1627 | env->pc -= 2; | |
28ecfd7a | 1628 | cpu->icount_decr.u16.low++; |
5b6dd868 BS |
1629 | env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); |
1630 | } | |
1631 | #endif | |
1632 | /* This should never happen. */ | |
1633 | if (n > CF_COUNT_MASK) { | |
a47dddd7 | 1634 | cpu_abort(cpu, "TB too big during recompile"); |
5b6dd868 BS |
1635 | } |
1636 | ||
1637 | cflags = n | CF_LAST_IO; | |
1638 | pc = tb->pc; | |
1639 | cs_base = tb->cs_base; | |
1640 | flags = tb->flags; | |
1641 | tb_phys_invalidate(tb, -1); | |
02d57ea1 SF |
1642 | if (tb->cflags & CF_NOCACHE) { |
1643 | if (tb->orig_tb) { | |
1644 | /* Invalidate original TB if this TB was generated in | |
1645 | * cpu_exec_nocache() */ | |
1646 | tb_phys_invalidate(tb->orig_tb, -1); | |
1647 | } | |
1648 | tb_free(tb); | |
1649 | } | |
5b6dd868 BS |
1650 | /* FIXME: In theory this could raise an exception. In practice |
1651 | we have already translated the block once so it's probably ok. */ | |
648f034c | 1652 | tb_gen_code(cpu, pc, cs_base, flags, cflags); |
5b6dd868 BS |
1653 | /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not |
1654 | the first in the TB) then we end up generating a whole new TB and | |
1655 | repeating the fault, which is horribly inefficient. | |
1656 | Better would be to execute just this insn uncached, or generate a | |
1657 | second new TB. */ | |
0ea8cb88 | 1658 | cpu_resume_from_signal(cpu, NULL); |
5b6dd868 BS |
1659 | } |
1660 | ||
611d4f99 | 1661 | void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) |
5b6dd868 BS |
1662 | { |
1663 | unsigned int i; | |
1664 | ||
1665 | /* Discard jump cache entries for any tb which might potentially | |
1666 | overlap the flushed page. */ | |
1667 | i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); | |
8cd70437 | 1668 | memset(&cpu->tb_jmp_cache[i], 0, |
5b6dd868 BS |
1669 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); |
1670 | ||
1671 | i = tb_jmp_cache_hash_page(addr); | |
8cd70437 | 1672 | memset(&cpu->tb_jmp_cache[i], 0, |
5b6dd868 BS |
1673 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); |
1674 | } | |
1675 | ||
1676 | void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) | |
1677 | { | |
1678 | int i, target_code_size, max_target_code_size; | |
1679 | int direct_jmp_count, direct_jmp2_count, cross_page; | |
1680 | TranslationBlock *tb; | |
1681 | ||
1682 | target_code_size = 0; | |
1683 | max_target_code_size = 0; | |
1684 | cross_page = 0; | |
1685 | direct_jmp_count = 0; | |
1686 | direct_jmp2_count = 0; | |
5e5f07e0 EV |
1687 | for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) { |
1688 | tb = &tcg_ctx.tb_ctx.tbs[i]; | |
5b6dd868 BS |
1689 | target_code_size += tb->size; |
1690 | if (tb->size > max_target_code_size) { | |
1691 | max_target_code_size = tb->size; | |
1692 | } | |
1693 | if (tb->page_addr[1] != -1) { | |
1694 | cross_page++; | |
1695 | } | |
1696 | if (tb->tb_next_offset[0] != 0xffff) { | |
1697 | direct_jmp_count++; | |
1698 | if (tb->tb_next_offset[1] != 0xffff) { | |
1699 | direct_jmp2_count++; | |
1700 | } | |
1701 | } | |
1702 | } | |
1703 | /* XXX: avoid using doubles ? */ | |
1704 | cpu_fprintf(f, "Translation buffer state:\n"); | |
1705 | cpu_fprintf(f, "gen code size %td/%zd\n", | |
0b0d3320 | 1706 | tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer, |
b125f9dc | 1707 | tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer); |
5b6dd868 | 1708 | cpu_fprintf(f, "TB count %d/%d\n", |
5e5f07e0 | 1709 | tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks); |
5b6dd868 | 1710 | cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", |
5e5f07e0 EV |
1711 | tcg_ctx.tb_ctx.nb_tbs ? target_code_size / |
1712 | tcg_ctx.tb_ctx.nb_tbs : 0, | |
1713 | max_target_code_size); | |
5b6dd868 | 1714 | cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", |
5e5f07e0 EV |
1715 | tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr - |
1716 | tcg_ctx.code_gen_buffer) / | |
1717 | tcg_ctx.tb_ctx.nb_tbs : 0, | |
1718 | target_code_size ? (double) (tcg_ctx.code_gen_ptr - | |
1719 | tcg_ctx.code_gen_buffer) / | |
1720 | target_code_size : 0); | |
1721 | cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page, | |
1722 | tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) / | |
1723 | tcg_ctx.tb_ctx.nb_tbs : 0); | |
5b6dd868 BS |
1724 | cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", |
1725 | direct_jmp_count, | |
5e5f07e0 EV |
1726 | tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) / |
1727 | tcg_ctx.tb_ctx.nb_tbs : 0, | |
5b6dd868 | 1728 | direct_jmp2_count, |
5e5f07e0 EV |
1729 | tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) / |
1730 | tcg_ctx.tb_ctx.nb_tbs : 0); | |
5b6dd868 | 1731 | cpu_fprintf(f, "\nStatistics:\n"); |
5e5f07e0 EV |
1732 | cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count); |
1733 | cpu_fprintf(f, "TB invalidate count %d\n", | |
1734 | tcg_ctx.tb_ctx.tb_phys_invalidate_count); | |
5b6dd868 BS |
1735 | cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); |
1736 | tcg_dump_info(f, cpu_fprintf); | |
1737 | } | |
1738 | ||
246ae24d MF |
1739 | void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf) |
1740 | { | |
1741 | tcg_dump_op_count(f, cpu_fprintf); | |
1742 | } | |
1743 | ||
5b6dd868 BS |
1744 | #else /* CONFIG_USER_ONLY */ |
1745 | ||
c3affe56 | 1746 | void cpu_interrupt(CPUState *cpu, int mask) |
5b6dd868 | 1747 | { |
259186a7 | 1748 | cpu->interrupt_request |= mask; |
378df4b2 | 1749 | cpu->tcg_exit_req = 1; |
5b6dd868 BS |
1750 | } |
1751 | ||
1752 | /* | |
1753 | * Walks guest process memory "regions" one by one | |
1754 | * and calls callback function 'fn' for each region. | |
1755 | */ | |
1756 | struct walk_memory_regions_data { | |
1757 | walk_memory_regions_fn fn; | |
1758 | void *priv; | |
1a1c4db9 | 1759 | target_ulong start; |
5b6dd868 BS |
1760 | int prot; |
1761 | }; | |
1762 | ||
1763 | static int walk_memory_regions_end(struct walk_memory_regions_data *data, | |
1a1c4db9 | 1764 | target_ulong end, int new_prot) |
5b6dd868 | 1765 | { |
1a1c4db9 | 1766 | if (data->start != -1u) { |
5b6dd868 BS |
1767 | int rc = data->fn(data->priv, data->start, end, data->prot); |
1768 | if (rc != 0) { | |
1769 | return rc; | |
1770 | } | |
1771 | } | |
1772 | ||
1a1c4db9 | 1773 | data->start = (new_prot ? end : -1u); |
5b6dd868 BS |
1774 | data->prot = new_prot; |
1775 | ||
1776 | return 0; | |
1777 | } | |
1778 | ||
1779 | static int walk_memory_regions_1(struct walk_memory_regions_data *data, | |
1a1c4db9 | 1780 | target_ulong base, int level, void **lp) |
5b6dd868 | 1781 | { |
1a1c4db9 | 1782 | target_ulong pa; |
5b6dd868 BS |
1783 | int i, rc; |
1784 | ||
1785 | if (*lp == NULL) { | |
1786 | return walk_memory_regions_end(data, base, 0); | |
1787 | } | |
1788 | ||
1789 | if (level == 0) { | |
1790 | PageDesc *pd = *lp; | |
1791 | ||
03f49957 | 1792 | for (i = 0; i < V_L2_SIZE; ++i) { |
5b6dd868 BS |
1793 | int prot = pd[i].flags; |
1794 | ||
1795 | pa = base | (i << TARGET_PAGE_BITS); | |
1796 | if (prot != data->prot) { | |
1797 | rc = walk_memory_regions_end(data, pa, prot); | |
1798 | if (rc != 0) { | |
1799 | return rc; | |
1800 | } | |
1801 | } | |
1802 | } | |
1803 | } else { | |
1804 | void **pp = *lp; | |
1805 | ||
03f49957 | 1806 | for (i = 0; i < V_L2_SIZE; ++i) { |
1a1c4db9 | 1807 | pa = base | ((target_ulong)i << |
03f49957 | 1808 | (TARGET_PAGE_BITS + V_L2_BITS * level)); |
5b6dd868 BS |
1809 | rc = walk_memory_regions_1(data, pa, level - 1, pp + i); |
1810 | if (rc != 0) { | |
1811 | return rc; | |
1812 | } | |
1813 | } | |
1814 | } | |
1815 | ||
1816 | return 0; | |
1817 | } | |
1818 | ||
1819 | int walk_memory_regions(void *priv, walk_memory_regions_fn fn) | |
1820 | { | |
1821 | struct walk_memory_regions_data data; | |
1822 | uintptr_t i; | |
1823 | ||
1824 | data.fn = fn; | |
1825 | data.priv = priv; | |
1a1c4db9 | 1826 | data.start = -1u; |
5b6dd868 BS |
1827 | data.prot = 0; |
1828 | ||
1829 | for (i = 0; i < V_L1_SIZE; i++) { | |
1a1c4db9 | 1830 | int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS), |
03f49957 | 1831 | V_L1_SHIFT / V_L2_BITS - 1, l1_map + i); |
5b6dd868 BS |
1832 | if (rc != 0) { |
1833 | return rc; | |
1834 | } | |
1835 | } | |
1836 | ||
1837 | return walk_memory_regions_end(&data, 0, 0); | |
1838 | } | |
1839 | ||
1a1c4db9 MI |
1840 | static int dump_region(void *priv, target_ulong start, |
1841 | target_ulong end, unsigned long prot) | |
5b6dd868 BS |
1842 | { |
1843 | FILE *f = (FILE *)priv; | |
1844 | ||
1a1c4db9 MI |
1845 | (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx |
1846 | " "TARGET_FMT_lx" %c%c%c\n", | |
5b6dd868 BS |
1847 | start, end, end - start, |
1848 | ((prot & PAGE_READ) ? 'r' : '-'), | |
1849 | ((prot & PAGE_WRITE) ? 'w' : '-'), | |
1850 | ((prot & PAGE_EXEC) ? 'x' : '-')); | |
1851 | ||
1852 | return 0; | |
1853 | } | |
1854 | ||
1855 | /* dump memory mappings */ | |
1856 | void page_dump(FILE *f) | |
1857 | { | |
1a1c4db9 | 1858 | const int length = sizeof(target_ulong) * 2; |
227b8175 SW |
1859 | (void) fprintf(f, "%-*s %-*s %-*s %s\n", |
1860 | length, "start", length, "end", length, "size", "prot"); | |
5b6dd868 BS |
1861 | walk_memory_regions(f, dump_region); |
1862 | } | |
1863 | ||
1864 | int page_get_flags(target_ulong address) | |
1865 | { | |
1866 | PageDesc *p; | |
1867 | ||
1868 | p = page_find(address >> TARGET_PAGE_BITS); | |
1869 | if (!p) { | |
1870 | return 0; | |
1871 | } | |
1872 | return p->flags; | |
1873 | } | |
1874 | ||
1875 | /* Modify the flags of a page and invalidate the code if necessary. | |
1876 | The flag PAGE_WRITE_ORG is positioned automatically depending | |
1877 | on PAGE_WRITE. The mmap_lock should already be held. */ | |
1878 | void page_set_flags(target_ulong start, target_ulong end, int flags) | |
1879 | { | |
1880 | target_ulong addr, len; | |
1881 | ||
1882 | /* This function should never be called with addresses outside the | |
1883 | guest address space. If this assert fires, it probably indicates | |
1884 | a missing call to h2g_valid. */ | |
1885 | #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS | |
1a1c4db9 | 1886 | assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); |
5b6dd868 BS |
1887 | #endif |
1888 | assert(start < end); | |
1889 | ||
1890 | start = start & TARGET_PAGE_MASK; | |
1891 | end = TARGET_PAGE_ALIGN(end); | |
1892 | ||
1893 | if (flags & PAGE_WRITE) { | |
1894 | flags |= PAGE_WRITE_ORG; | |
1895 | } | |
1896 | ||
1897 | for (addr = start, len = end - start; | |
1898 | len != 0; | |
1899 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
1900 | PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); | |
1901 | ||
1902 | /* If the write protection bit is set, then we invalidate | |
1903 | the code inside. */ | |
1904 | if (!(p->flags & PAGE_WRITE) && | |
1905 | (flags & PAGE_WRITE) && | |
1906 | p->first_tb) { | |
d02532f0 | 1907 | tb_invalidate_phys_page(addr, 0, NULL, false); |
5b6dd868 BS |
1908 | } |
1909 | p->flags = flags; | |
1910 | } | |
1911 | } | |
1912 | ||
1913 | int page_check_range(target_ulong start, target_ulong len, int flags) | |
1914 | { | |
1915 | PageDesc *p; | |
1916 | target_ulong end; | |
1917 | target_ulong addr; | |
1918 | ||
1919 | /* This function should never be called with addresses outside the | |
1920 | guest address space. If this assert fires, it probably indicates | |
1921 | a missing call to h2g_valid. */ | |
1922 | #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS | |
1a1c4db9 | 1923 | assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); |
5b6dd868 BS |
1924 | #endif |
1925 | ||
1926 | if (len == 0) { | |
1927 | return 0; | |
1928 | } | |
1929 | if (start + len - 1 < start) { | |
1930 | /* We've wrapped around. */ | |
1931 | return -1; | |
1932 | } | |
1933 | ||
1934 | /* must do before we loose bits in the next step */ | |
1935 | end = TARGET_PAGE_ALIGN(start + len); | |
1936 | start = start & TARGET_PAGE_MASK; | |
1937 | ||
1938 | for (addr = start, len = end - start; | |
1939 | len != 0; | |
1940 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
1941 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1942 | if (!p) { | |
1943 | return -1; | |
1944 | } | |
1945 | if (!(p->flags & PAGE_VALID)) { | |
1946 | return -1; | |
1947 | } | |
1948 | ||
1949 | if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { | |
1950 | return -1; | |
1951 | } | |
1952 | if (flags & PAGE_WRITE) { | |
1953 | if (!(p->flags & PAGE_WRITE_ORG)) { | |
1954 | return -1; | |
1955 | } | |
1956 | /* unprotect the page if it was put read-only because it | |
1957 | contains translated code */ | |
1958 | if (!(p->flags & PAGE_WRITE)) { | |
1959 | if (!page_unprotect(addr, 0, NULL)) { | |
1960 | return -1; | |
1961 | } | |
1962 | } | |
5b6dd868 BS |
1963 | } |
1964 | } | |
1965 | return 0; | |
1966 | } | |
1967 | ||
1968 | /* called from signal handler: invalidate the code and unprotect the | |
1969 | page. Return TRUE if the fault was successfully handled. */ | |
1970 | int page_unprotect(target_ulong address, uintptr_t pc, void *puc) | |
1971 | { | |
1972 | unsigned int prot; | |
1973 | PageDesc *p; | |
1974 | target_ulong host_start, host_end, addr; | |
1975 | ||
1976 | /* Technically this isn't safe inside a signal handler. However we | |
1977 | know this only ever happens in a synchronous SEGV handler, so in | |
1978 | practice it seems to be ok. */ | |
1979 | mmap_lock(); | |
1980 | ||
1981 | p = page_find(address >> TARGET_PAGE_BITS); | |
1982 | if (!p) { | |
1983 | mmap_unlock(); | |
1984 | return 0; | |
1985 | } | |
1986 | ||
1987 | /* if the page was really writable, then we change its | |
1988 | protection back to writable */ | |
1989 | if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { | |
1990 | host_start = address & qemu_host_page_mask; | |
1991 | host_end = host_start + qemu_host_page_size; | |
1992 | ||
1993 | prot = 0; | |
1994 | for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { | |
1995 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1996 | p->flags |= PAGE_WRITE; | |
1997 | prot |= p->flags; | |
1998 | ||
1999 | /* and since the content will be modified, we must invalidate | |
2000 | the corresponding translated code. */ | |
d02532f0 | 2001 | tb_invalidate_phys_page(addr, pc, puc, true); |
5b6dd868 BS |
2002 | #ifdef DEBUG_TB_CHECK |
2003 | tb_invalidate_check(addr); | |
2004 | #endif | |
2005 | } | |
2006 | mprotect((void *)g2h(host_start), qemu_host_page_size, | |
2007 | prot & PAGE_BITS); | |
2008 | ||
2009 | mmap_unlock(); | |
2010 | return 1; | |
2011 | } | |
2012 | mmap_unlock(); | |
2013 | return 0; | |
2014 | } | |
2015 | #endif /* CONFIG_USER_ONLY */ |