]>
Commit | Line | Data |
---|---|---|
d19893da FB |
1 | /* |
2 | * Host code generation | |
5fafdf24 | 3 | * |
d19893da FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d19893da | 18 | */ |
5b6dd868 BS |
19 | #ifdef _WIN32 |
20 | #include <windows.h> | |
21 | #else | |
5b6dd868 BS |
22 | #include <sys/mman.h> |
23 | #endif | |
7b31bbc2 | 24 | #include "qemu/osdep.h" |
d19893da | 25 | |
2054396a | 26 | |
5b6dd868 | 27 | #include "qemu-common.h" |
af5ad107 | 28 | #define NO_CPU_IO_DEFS |
d3eead2e | 29 | #include "cpu.h" |
6db8b538 | 30 | #include "trace.h" |
76cad711 | 31 | #include "disas/disas.h" |
57fec1fe | 32 | #include "tcg.h" |
5b6dd868 BS |
33 | #if defined(CONFIG_USER_ONLY) |
34 | #include "qemu.h" | |
35 | #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) | |
36 | #include <sys/param.h> | |
37 | #if __FreeBSD_version >= 700104 | |
38 | #define HAVE_KINFO_GETVMMAP | |
39 | #define sigqueue sigqueue_freebsd /* avoid redefinition */ | |
5b6dd868 BS |
40 | #include <sys/proc.h> |
41 | #include <machine/profile.h> | |
42 | #define _KERNEL | |
43 | #include <sys/user.h> | |
44 | #undef _KERNEL | |
45 | #undef sigqueue | |
46 | #include <libutil.h> | |
47 | #endif | |
48 | #endif | |
0bc3cd62 PB |
49 | #else |
50 | #include "exec/address-spaces.h" | |
5b6dd868 BS |
51 | #endif |
52 | ||
022c62cb | 53 | #include "exec/cputlb.h" |
e1b89321 | 54 | #include "exec/tb-hash.h" |
5b6dd868 | 55 | #include "translate-all.h" |
510a647f | 56 | #include "qemu/bitmap.h" |
0aa09897 | 57 | #include "qemu/timer.h" |
508127e2 | 58 | #include "exec/log.h" |
5b6dd868 BS |
59 | |
60 | //#define DEBUG_TB_INVALIDATE | |
61 | //#define DEBUG_FLUSH | |
62 | /* make various TB consistency checks */ | |
63 | //#define DEBUG_TB_CHECK | |
64 | ||
65 | #if !defined(CONFIG_USER_ONLY) | |
66 | /* TB consistency checks only implemented for usermode emulation. */ | |
67 | #undef DEBUG_TB_CHECK | |
68 | #endif | |
69 | ||
70 | #define SMC_BITMAP_USE_THRESHOLD 10 | |
71 | ||
5b6dd868 BS |
72 | typedef struct PageDesc { |
73 | /* list of TBs intersecting this ram page */ | |
74 | TranslationBlock *first_tb; | |
75 | /* in order to optimize self modifying code, we count the number | |
76 | of lookups we do to a given page to use a bitmap */ | |
77 | unsigned int code_write_count; | |
510a647f | 78 | unsigned long *code_bitmap; |
5b6dd868 BS |
79 | #if defined(CONFIG_USER_ONLY) |
80 | unsigned long flags; | |
81 | #endif | |
82 | } PageDesc; | |
83 | ||
84 | /* In system mode we want L1_MAP to be based on ram offsets, | |
85 | while in user mode we want it to be based on virtual addresses. */ | |
86 | #if !defined(CONFIG_USER_ONLY) | |
87 | #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS | |
88 | # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS | |
89 | #else | |
90 | # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS | |
91 | #endif | |
92 | #else | |
93 | # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS | |
94 | #endif | |
95 | ||
03f49957 PB |
96 | /* Size of the L2 (and L3, etc) page tables. */ |
97 | #define V_L2_BITS 10 | |
98 | #define V_L2_SIZE (1 << V_L2_BITS) | |
99 | ||
5b6dd868 BS |
100 | /* The bits remaining after N lower levels of page tables. */ |
101 | #define V_L1_BITS_REM \ | |
03f49957 | 102 | ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS) |
5b6dd868 BS |
103 | |
104 | #if V_L1_BITS_REM < 4 | |
03f49957 | 105 | #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS) |
5b6dd868 BS |
106 | #else |
107 | #define V_L1_BITS V_L1_BITS_REM | |
108 | #endif | |
109 | ||
110 | #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) | |
111 | ||
112 | #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) | |
113 | ||
5b6dd868 | 114 | uintptr_t qemu_host_page_size; |
0c2d70c4 | 115 | intptr_t qemu_host_page_mask; |
5b6dd868 | 116 | |
d1142fb8 | 117 | /* The bottom level has pointers to PageDesc */ |
5b6dd868 BS |
118 | static void *l1_map[V_L1_SIZE]; |
119 | ||
57fec1fe FB |
120 | /* code generation context */ |
121 | TCGContext tcg_ctx; | |
d19893da | 122 | |
677ef623 FK |
123 | /* translation block context */ |
124 | #ifdef CONFIG_USER_ONLY | |
125 | __thread int have_tb_lock; | |
126 | #endif | |
127 | ||
128 | void tb_lock(void) | |
129 | { | |
130 | #ifdef CONFIG_USER_ONLY | |
131 | assert(!have_tb_lock); | |
132 | qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); | |
133 | have_tb_lock++; | |
134 | #endif | |
135 | } | |
136 | ||
137 | void tb_unlock(void) | |
138 | { | |
139 | #ifdef CONFIG_USER_ONLY | |
140 | assert(have_tb_lock); | |
141 | have_tb_lock--; | |
142 | qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); | |
143 | #endif | |
144 | } | |
145 | ||
146 | void tb_lock_reset(void) | |
147 | { | |
148 | #ifdef CONFIG_USER_ONLY | |
149 | if (have_tb_lock) { | |
150 | qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); | |
151 | have_tb_lock = 0; | |
152 | } | |
153 | #endif | |
154 | } | |
155 | ||
5b6dd868 BS |
156 | static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, |
157 | tb_page_addr_t phys_page2); | |
a8a826a3 | 158 | static TranslationBlock *tb_find_pc(uintptr_t tc_ptr); |
5b6dd868 | 159 | |
57fec1fe FB |
160 | void cpu_gen_init(void) |
161 | { | |
162 | tcg_context_init(&tcg_ctx); | |
57fec1fe FB |
163 | } |
164 | ||
fca8a500 RH |
165 | /* Encode VAL as a signed leb128 sequence at P. |
166 | Return P incremented past the encoded value. */ | |
167 | static uint8_t *encode_sleb128(uint8_t *p, target_long val) | |
168 | { | |
169 | int more, byte; | |
170 | ||
171 | do { | |
172 | byte = val & 0x7f; | |
173 | val >>= 7; | |
174 | more = !((val == 0 && (byte & 0x40) == 0) | |
175 | || (val == -1 && (byte & 0x40) != 0)); | |
176 | if (more) { | |
177 | byte |= 0x80; | |
178 | } | |
179 | *p++ = byte; | |
180 | } while (more); | |
181 | ||
182 | return p; | |
183 | } | |
184 | ||
185 | /* Decode a signed leb128 sequence at *PP; increment *PP past the | |
186 | decoded value. Return the decoded value. */ | |
187 | static target_long decode_sleb128(uint8_t **pp) | |
188 | { | |
189 | uint8_t *p = *pp; | |
190 | target_long val = 0; | |
191 | int byte, shift = 0; | |
192 | ||
193 | do { | |
194 | byte = *p++; | |
195 | val |= (target_ulong)(byte & 0x7f) << shift; | |
196 | shift += 7; | |
197 | } while (byte & 0x80); | |
198 | if (shift < TARGET_LONG_BITS && (byte & 0x40)) { | |
199 | val |= -(target_ulong)1 << shift; | |
200 | } | |
201 | ||
202 | *pp = p; | |
203 | return val; | |
204 | } | |
205 | ||
206 | /* Encode the data collected about the instructions while compiling TB. | |
207 | Place the data at BLOCK, and return the number of bytes consumed. | |
208 | ||
209 | The logical table consisits of TARGET_INSN_START_WORDS target_ulong's, | |
210 | which come from the target's insn_start data, followed by a uintptr_t | |
211 | which comes from the host pc of the end of the code implementing the insn. | |
212 | ||
213 | Each line of the table is encoded as sleb128 deltas from the previous | |
214 | line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }. | |
215 | That is, the first column is seeded with the guest pc, the last column | |
216 | with the host pc, and the middle columns with zeros. */ | |
217 | ||
218 | static int encode_search(TranslationBlock *tb, uint8_t *block) | |
219 | { | |
b125f9dc | 220 | uint8_t *highwater = tcg_ctx.code_gen_highwater; |
fca8a500 RH |
221 | uint8_t *p = block; |
222 | int i, j, n; | |
223 | ||
224 | tb->tc_search = block; | |
225 | ||
226 | for (i = 0, n = tb->icount; i < n; ++i) { | |
227 | target_ulong prev; | |
228 | ||
229 | for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { | |
230 | if (i == 0) { | |
231 | prev = (j == 0 ? tb->pc : 0); | |
232 | } else { | |
233 | prev = tcg_ctx.gen_insn_data[i - 1][j]; | |
234 | } | |
235 | p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev); | |
236 | } | |
237 | prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]); | |
238 | p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev); | |
b125f9dc RH |
239 | |
240 | /* Test for (pending) buffer overflow. The assumption is that any | |
241 | one row beginning below the high water mark cannot overrun | |
242 | the buffer completely. Thus we can test for overflow after | |
243 | encoding a row without having to check during encoding. */ | |
244 | if (unlikely(p > highwater)) { | |
245 | return -1; | |
246 | } | |
fca8a500 RH |
247 | } |
248 | ||
249 | return p - block; | |
250 | } | |
251 | ||
fec88f64 | 252 | /* The cpu state corresponding to 'searched_pc' is restored. */ |
74f10515 | 253 | static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, |
a8a826a3 | 254 | uintptr_t searched_pc) |
d19893da | 255 | { |
fca8a500 RH |
256 | target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; |
257 | uintptr_t host_pc = (uintptr_t)tb->tc_ptr; | |
74f10515 | 258 | CPUArchState *env = cpu->env_ptr; |
fca8a500 RH |
259 | uint8_t *p = tb->tc_search; |
260 | int i, j, num_insns = tb->icount; | |
57fec1fe | 261 | #ifdef CONFIG_PROFILER |
fca8a500 | 262 | int64_t ti = profile_getclock(); |
57fec1fe FB |
263 | #endif |
264 | ||
fca8a500 RH |
265 | if (searched_pc < host_pc) { |
266 | return -1; | |
267 | } | |
d19893da | 268 | |
fca8a500 RH |
269 | /* Reconstruct the stored insn data while looking for the point at |
270 | which the end of the insn exceeds the searched_pc. */ | |
271 | for (i = 0; i < num_insns; ++i) { | |
272 | for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { | |
273 | data[j] += decode_sleb128(&p); | |
274 | } | |
275 | host_pc += decode_sleb128(&p); | |
276 | if (host_pc > searched_pc) { | |
277 | goto found; | |
278 | } | |
279 | } | |
280 | return -1; | |
3b46e624 | 281 | |
fca8a500 | 282 | found: |
bd79255d | 283 | if (tb->cflags & CF_USE_ICOUNT) { |
414b15c9 | 284 | assert(use_icount); |
2e70f6ef | 285 | /* Reset the cycle counter to the start of the block. */ |
fca8a500 | 286 | cpu->icount_decr.u16.low += num_insns; |
2e70f6ef | 287 | /* Clear the IO flag. */ |
99df7dce | 288 | cpu->can_do_io = 0; |
2e70f6ef | 289 | } |
fca8a500 RH |
290 | cpu->icount_decr.u16.low -= i; |
291 | restore_state_to_opc(env, tb, data); | |
57fec1fe FB |
292 | |
293 | #ifdef CONFIG_PROFILER | |
fca8a500 RH |
294 | tcg_ctx.restore_time += profile_getclock() - ti; |
295 | tcg_ctx.restore_count++; | |
57fec1fe | 296 | #endif |
d19893da FB |
297 | return 0; |
298 | } | |
5b6dd868 | 299 | |
3f38f309 | 300 | bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr) |
a8a826a3 BS |
301 | { |
302 | TranslationBlock *tb; | |
303 | ||
304 | tb = tb_find_pc(retaddr); | |
305 | if (tb) { | |
74f10515 | 306 | cpu_restore_state_from_tb(cpu, tb, retaddr); |
d8a499f1 PD |
307 | if (tb->cflags & CF_NOCACHE) { |
308 | /* one-shot translation, invalidate it immediately */ | |
309 | cpu->current_tb = NULL; | |
310 | tb_phys_invalidate(tb, -1); | |
311 | tb_free(tb); | |
312 | } | |
a8a826a3 BS |
313 | return true; |
314 | } | |
315 | return false; | |
316 | } | |
317 | ||
47c16ed5 | 318 | void page_size_init(void) |
5b6dd868 BS |
319 | { |
320 | /* NOTE: we can always suppose that qemu_host_page_size >= | |
321 | TARGET_PAGE_SIZE */ | |
5b6dd868 | 322 | qemu_real_host_page_size = getpagesize(); |
0c2d70c4 | 323 | qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size; |
5b6dd868 BS |
324 | if (qemu_host_page_size == 0) { |
325 | qemu_host_page_size = qemu_real_host_page_size; | |
326 | } | |
327 | if (qemu_host_page_size < TARGET_PAGE_SIZE) { | |
328 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
329 | } | |
0c2d70c4 | 330 | qemu_host_page_mask = -(intptr_t)qemu_host_page_size; |
47c16ed5 | 331 | } |
5b6dd868 | 332 | |
47c16ed5 AK |
333 | static void page_init(void) |
334 | { | |
335 | page_size_init(); | |
5b6dd868 BS |
336 | #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) |
337 | { | |
338 | #ifdef HAVE_KINFO_GETVMMAP | |
339 | struct kinfo_vmentry *freep; | |
340 | int i, cnt; | |
341 | ||
342 | freep = kinfo_getvmmap(getpid(), &cnt); | |
343 | if (freep) { | |
344 | mmap_lock(); | |
345 | for (i = 0; i < cnt; i++) { | |
346 | unsigned long startaddr, endaddr; | |
347 | ||
348 | startaddr = freep[i].kve_start; | |
349 | endaddr = freep[i].kve_end; | |
350 | if (h2g_valid(startaddr)) { | |
351 | startaddr = h2g(startaddr) & TARGET_PAGE_MASK; | |
352 | ||
353 | if (h2g_valid(endaddr)) { | |
354 | endaddr = h2g(endaddr); | |
355 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
356 | } else { | |
357 | #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS | |
358 | endaddr = ~0ul; | |
359 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
360 | #endif | |
361 | } | |
362 | } | |
363 | } | |
364 | free(freep); | |
365 | mmap_unlock(); | |
366 | } | |
367 | #else | |
368 | FILE *f; | |
369 | ||
370 | last_brk = (unsigned long)sbrk(0); | |
371 | ||
372 | f = fopen("/compat/linux/proc/self/maps", "r"); | |
373 | if (f) { | |
374 | mmap_lock(); | |
375 | ||
376 | do { | |
377 | unsigned long startaddr, endaddr; | |
378 | int n; | |
379 | ||
380 | n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); | |
381 | ||
382 | if (n == 2 && h2g_valid(startaddr)) { | |
383 | startaddr = h2g(startaddr) & TARGET_PAGE_MASK; | |
384 | ||
385 | if (h2g_valid(endaddr)) { | |
386 | endaddr = h2g(endaddr); | |
387 | } else { | |
388 | endaddr = ~0ul; | |
389 | } | |
390 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
391 | } | |
392 | } while (!feof(f)); | |
393 | ||
394 | fclose(f); | |
395 | mmap_unlock(); | |
396 | } | |
397 | #endif | |
398 | } | |
399 | #endif | |
400 | } | |
401 | ||
75692087 PB |
402 | /* If alloc=1: |
403 | * Called with mmap_lock held for user-mode emulation. | |
404 | */ | |
5b6dd868 BS |
405 | static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) |
406 | { | |
407 | PageDesc *pd; | |
408 | void **lp; | |
409 | int i; | |
410 | ||
5b6dd868 BS |
411 | /* Level 1. Always allocated. */ |
412 | lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1)); | |
413 | ||
414 | /* Level 2..N-1. */ | |
03f49957 | 415 | for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) { |
6940fab8 | 416 | void **p = atomic_rcu_read(lp); |
5b6dd868 BS |
417 | |
418 | if (p == NULL) { | |
419 | if (!alloc) { | |
420 | return NULL; | |
421 | } | |
e3a0abfd | 422 | p = g_new0(void *, V_L2_SIZE); |
6940fab8 | 423 | atomic_rcu_set(lp, p); |
5b6dd868 BS |
424 | } |
425 | ||
03f49957 | 426 | lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); |
5b6dd868 BS |
427 | } |
428 | ||
6940fab8 | 429 | pd = atomic_rcu_read(lp); |
5b6dd868 BS |
430 | if (pd == NULL) { |
431 | if (!alloc) { | |
432 | return NULL; | |
433 | } | |
e3a0abfd | 434 | pd = g_new0(PageDesc, V_L2_SIZE); |
6940fab8 | 435 | atomic_rcu_set(lp, pd); |
5b6dd868 BS |
436 | } |
437 | ||
03f49957 | 438 | return pd + (index & (V_L2_SIZE - 1)); |
5b6dd868 BS |
439 | } |
440 | ||
441 | static inline PageDesc *page_find(tb_page_addr_t index) | |
442 | { | |
443 | return page_find_alloc(index, 0); | |
444 | } | |
445 | ||
5b6dd868 BS |
446 | #if defined(CONFIG_USER_ONLY) |
447 | /* Currently it is not recommended to allocate big chunks of data in | |
448 | user mode. It will change when a dedicated libc will be used. */ | |
449 | /* ??? 64-bit hosts ought to have no problem mmaping data outside the | |
450 | region in which the guest needs to run. Revisit this. */ | |
451 | #define USE_STATIC_CODE_GEN_BUFFER | |
452 | #endif | |
453 | ||
5b6dd868 BS |
454 | /* Minimum size of the code gen buffer. This number is randomly chosen, |
455 | but not so small that we can't have a fair number of TB's live. */ | |
456 | #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) | |
457 | ||
458 | /* Maximum size of the code gen buffer we'd like to use. Unless otherwise | |
459 | indicated, this is constrained by the range of direct branches on the | |
460 | host cpu, as used by the TCG implementation of goto_tb. */ | |
461 | #if defined(__x86_64__) | |
462 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
463 | #elif defined(__sparc__) | |
464 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
5bfd75a3 RH |
465 | #elif defined(__powerpc64__) |
466 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
4a136e0a CF |
467 | #elif defined(__aarch64__) |
468 | # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) | |
5b6dd868 BS |
469 | #elif defined(__arm__) |
470 | # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024) | |
471 | #elif defined(__s390x__) | |
472 | /* We have a +- 4GB range on the branches; leave some slop. */ | |
473 | # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) | |
479eb121 RH |
474 | #elif defined(__mips__) |
475 | /* We have a 256MB branch region, but leave room to make sure the | |
476 | main executable is also within that region. */ | |
477 | # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) | |
5b6dd868 BS |
478 | #else |
479 | # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) | |
480 | #endif | |
481 | ||
482 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) | |
483 | ||
484 | #define DEFAULT_CODE_GEN_BUFFER_SIZE \ | |
485 | (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ | |
486 | ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) | |
487 | ||
488 | static inline size_t size_code_gen_buffer(size_t tb_size) | |
489 | { | |
490 | /* Size the buffer. */ | |
491 | if (tb_size == 0) { | |
492 | #ifdef USE_STATIC_CODE_GEN_BUFFER | |
493 | tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; | |
494 | #else | |
495 | /* ??? Needs adjustments. */ | |
496 | /* ??? If we relax the requirement that CONFIG_USER_ONLY use the | |
497 | static buffer, we could size this on RESERVED_VA, on the text | |
498 | segment size of the executable, or continue to use the default. */ | |
499 | tb_size = (unsigned long)(ram_size / 4); | |
500 | #endif | |
501 | } | |
502 | if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { | |
503 | tb_size = MIN_CODE_GEN_BUFFER_SIZE; | |
504 | } | |
505 | if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { | |
506 | tb_size = MAX_CODE_GEN_BUFFER_SIZE; | |
507 | } | |
0b0d3320 | 508 | tcg_ctx.code_gen_buffer_size = tb_size; |
5b6dd868 BS |
509 | return tb_size; |
510 | } | |
511 | ||
483c76e1 RH |
512 | #ifdef __mips__ |
513 | /* In order to use J and JAL within the code_gen_buffer, we require | |
514 | that the buffer not cross a 256MB boundary. */ | |
515 | static inline bool cross_256mb(void *addr, size_t size) | |
516 | { | |
517 | return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000; | |
518 | } | |
519 | ||
520 | /* We weren't able to allocate a buffer without crossing that boundary, | |
521 | so make do with the larger portion of the buffer that doesn't cross. | |
522 | Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ | |
523 | static inline void *split_cross_256mb(void *buf1, size_t size1) | |
524 | { | |
525 | void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000); | |
526 | size_t size2 = buf1 + size1 - buf2; | |
527 | ||
528 | size1 = buf2 - buf1; | |
529 | if (size1 < size2) { | |
530 | size1 = size2; | |
531 | buf1 = buf2; | |
532 | } | |
533 | ||
534 | tcg_ctx.code_gen_buffer_size = size1; | |
535 | return buf1; | |
536 | } | |
537 | #endif | |
538 | ||
5b6dd868 BS |
539 | #ifdef USE_STATIC_CODE_GEN_BUFFER |
540 | static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] | |
541 | __attribute__((aligned(CODE_GEN_ALIGN))); | |
542 | ||
f293709c RH |
543 | # ifdef _WIN32 |
544 | static inline void do_protect(void *addr, long size, int prot) | |
545 | { | |
546 | DWORD old_protect; | |
547 | VirtualProtect(addr, size, prot, &old_protect); | |
548 | } | |
549 | ||
550 | static inline void map_exec(void *addr, long size) | |
551 | { | |
552 | do_protect(addr, size, PAGE_EXECUTE_READWRITE); | |
553 | } | |
554 | ||
555 | static inline void map_none(void *addr, long size) | |
556 | { | |
557 | do_protect(addr, size, PAGE_NOACCESS); | |
558 | } | |
559 | # else | |
560 | static inline void do_protect(void *addr, long size, int prot) | |
561 | { | |
562 | uintptr_t start, end; | |
563 | ||
564 | start = (uintptr_t)addr; | |
565 | start &= qemu_real_host_page_mask; | |
566 | ||
567 | end = (uintptr_t)addr + size; | |
568 | end = ROUND_UP(end, qemu_real_host_page_size); | |
569 | ||
570 | mprotect((void *)start, end - start, prot); | |
571 | } | |
572 | ||
573 | static inline void map_exec(void *addr, long size) | |
574 | { | |
575 | do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC); | |
576 | } | |
577 | ||
578 | static inline void map_none(void *addr, long size) | |
579 | { | |
580 | do_protect(addr, size, PROT_NONE); | |
581 | } | |
582 | # endif /* WIN32 */ | |
583 | ||
5b6dd868 BS |
584 | static inline void *alloc_code_gen_buffer(void) |
585 | { | |
483c76e1 | 586 | void *buf = static_code_gen_buffer; |
f293709c RH |
587 | size_t full_size, size; |
588 | ||
589 | /* The size of the buffer, rounded down to end on a page boundary. */ | |
590 | full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer)) | |
591 | & qemu_real_host_page_mask) - (uintptr_t)buf; | |
592 | ||
593 | /* Reserve a guard page. */ | |
594 | size = full_size - qemu_real_host_page_size; | |
595 | ||
596 | /* Honor a command-line option limiting the size of the buffer. */ | |
597 | if (size > tcg_ctx.code_gen_buffer_size) { | |
598 | size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size) | |
599 | & qemu_real_host_page_mask) - (uintptr_t)buf; | |
600 | } | |
601 | tcg_ctx.code_gen_buffer_size = size; | |
602 | ||
483c76e1 | 603 | #ifdef __mips__ |
f293709c RH |
604 | if (cross_256mb(buf, size)) { |
605 | buf = split_cross_256mb(buf, size); | |
606 | size = tcg_ctx.code_gen_buffer_size; | |
483c76e1 RH |
607 | } |
608 | #endif | |
f293709c RH |
609 | |
610 | map_exec(buf, size); | |
611 | map_none(buf + size, qemu_real_host_page_size); | |
612 | qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); | |
613 | ||
483c76e1 | 614 | return buf; |
5b6dd868 | 615 | } |
f293709c RH |
616 | #elif defined(_WIN32) |
617 | static inline void *alloc_code_gen_buffer(void) | |
618 | { | |
619 | size_t size = tcg_ctx.code_gen_buffer_size; | |
620 | void *buf1, *buf2; | |
621 | ||
622 | /* Perform the allocation in two steps, so that the guard page | |
623 | is reserved but uncommitted. */ | |
624 | buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size, | |
625 | MEM_RESERVE, PAGE_NOACCESS); | |
626 | if (buf1 != NULL) { | |
627 | buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE); | |
628 | assert(buf1 == buf2); | |
629 | } | |
630 | ||
631 | return buf1; | |
632 | } | |
633 | #else | |
5b6dd868 BS |
634 | static inline void *alloc_code_gen_buffer(void) |
635 | { | |
636 | int flags = MAP_PRIVATE | MAP_ANONYMOUS; | |
637 | uintptr_t start = 0; | |
f293709c | 638 | size_t size = tcg_ctx.code_gen_buffer_size; |
5b6dd868 BS |
639 | void *buf; |
640 | ||
641 | /* Constrain the position of the buffer based on the host cpu. | |
642 | Note that these addresses are chosen in concert with the | |
643 | addresses assigned in the relevant linker script file. */ | |
644 | # if defined(__PIE__) || defined(__PIC__) | |
645 | /* Don't bother setting a preferred location if we're building | |
646 | a position-independent executable. We're more likely to get | |
647 | an address near the main executable if we let the kernel | |
648 | choose the address. */ | |
649 | # elif defined(__x86_64__) && defined(MAP_32BIT) | |
650 | /* Force the memory down into low memory with the executable. | |
651 | Leave the choice of exact location with the kernel. */ | |
652 | flags |= MAP_32BIT; | |
653 | /* Cannot expect to map more than 800MB in low memory. */ | |
f293709c RH |
654 | if (size > 800u * 1024 * 1024) { |
655 | tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024; | |
5b6dd868 BS |
656 | } |
657 | # elif defined(__sparc__) | |
658 | start = 0x40000000ul; | |
659 | # elif defined(__s390x__) | |
660 | start = 0x90000000ul; | |
479eb121 | 661 | # elif defined(__mips__) |
f293709c | 662 | # if _MIPS_SIM == _ABI64 |
479eb121 RH |
663 | start = 0x128000000ul; |
664 | # else | |
665 | start = 0x08000000ul; | |
666 | # endif | |
5b6dd868 BS |
667 | # endif |
668 | ||
f293709c RH |
669 | buf = mmap((void *)start, size + qemu_real_host_page_size, |
670 | PROT_NONE, flags, -1, 0); | |
483c76e1 RH |
671 | if (buf == MAP_FAILED) { |
672 | return NULL; | |
673 | } | |
674 | ||
675 | #ifdef __mips__ | |
f293709c | 676 | if (cross_256mb(buf, size)) { |
5d831be2 | 677 | /* Try again, with the original still mapped, to avoid re-acquiring |
483c76e1 | 678 | that 256mb crossing. This time don't specify an address. */ |
f293709c RH |
679 | size_t size2; |
680 | void *buf2 = mmap(NULL, size + qemu_real_host_page_size, | |
681 | PROT_NONE, flags, -1, 0); | |
682 | switch (buf2 != MAP_FAILED) { | |
683 | case 1: | |
684 | if (!cross_256mb(buf2, size)) { | |
483c76e1 | 685 | /* Success! Use the new buffer. */ |
f293709c RH |
686 | munmap(buf, size); |
687 | break; | |
483c76e1 RH |
688 | } |
689 | /* Failure. Work with what we had. */ | |
f293709c RH |
690 | munmap(buf2, size); |
691 | /* fallthru */ | |
692 | default: | |
693 | /* Split the original buffer. Free the smaller half. */ | |
694 | buf2 = split_cross_256mb(buf, size); | |
695 | size2 = tcg_ctx.code_gen_buffer_size; | |
696 | if (buf == buf2) { | |
697 | munmap(buf + size2 + qemu_real_host_page_size, size - size2); | |
698 | } else { | |
699 | munmap(buf, size - size2); | |
700 | } | |
701 | size = size2; | |
702 | break; | |
483c76e1 | 703 | } |
f293709c | 704 | buf = buf2; |
483c76e1 RH |
705 | } |
706 | #endif | |
707 | ||
f293709c RH |
708 | /* Make the final buffer accessible. The guard page at the end |
709 | will remain inaccessible with PROT_NONE. */ | |
710 | mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC); | |
483c76e1 | 711 | |
f293709c RH |
712 | /* Request large pages for the buffer. */ |
713 | qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); | |
483c76e1 | 714 | |
5b6dd868 BS |
715 | return buf; |
716 | } | |
f293709c | 717 | #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ |
5b6dd868 BS |
718 | |
719 | static inline void code_gen_alloc(size_t tb_size) | |
720 | { | |
0b0d3320 EV |
721 | tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size); |
722 | tcg_ctx.code_gen_buffer = alloc_code_gen_buffer(); | |
723 | if (tcg_ctx.code_gen_buffer == NULL) { | |
5b6dd868 BS |
724 | fprintf(stderr, "Could not allocate dynamic translator buffer\n"); |
725 | exit(1); | |
726 | } | |
727 | ||
8163b749 RH |
728 | /* Estimate a good size for the number of TBs we can support. We |
729 | still haven't deducted the prologue from the buffer size here, | |
730 | but that's minimal and won't affect the estimate much. */ | |
731 | tcg_ctx.code_gen_max_blocks | |
732 | = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; | |
733 | tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks); | |
734 | ||
677ef623 | 735 | qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); |
5b6dd868 BS |
736 | } |
737 | ||
738 | /* Must be called before using the QEMU cpus. 'tb_size' is the size | |
739 | (in bytes) allocated to the translation buffer. Zero means default | |
740 | size. */ | |
741 | void tcg_exec_init(unsigned long tb_size) | |
742 | { | |
743 | cpu_gen_init(); | |
5b6dd868 | 744 | page_init(); |
f293709c | 745 | code_gen_alloc(tb_size); |
4cbea598 | 746 | #if defined(CONFIG_SOFTMMU) |
5b6dd868 BS |
747 | /* There's no guest base to take into account, so go ahead and |
748 | initialize the prologue now. */ | |
749 | tcg_prologue_init(&tcg_ctx); | |
750 | #endif | |
751 | } | |
752 | ||
753 | bool tcg_enabled(void) | |
754 | { | |
0b0d3320 | 755 | return tcg_ctx.code_gen_buffer != NULL; |
5b6dd868 BS |
756 | } |
757 | ||
758 | /* Allocate a new translation block. Flush the translation buffer if | |
759 | too many translation blocks or too much generated code. */ | |
760 | static TranslationBlock *tb_alloc(target_ulong pc) | |
761 | { | |
762 | TranslationBlock *tb; | |
763 | ||
b125f9dc | 764 | if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) { |
5b6dd868 BS |
765 | return NULL; |
766 | } | |
5e5f07e0 | 767 | tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++]; |
5b6dd868 BS |
768 | tb->pc = pc; |
769 | tb->cflags = 0; | |
770 | return tb; | |
771 | } | |
772 | ||
773 | void tb_free(TranslationBlock *tb) | |
774 | { | |
775 | /* In practice this is mostly used for single use temporary TB | |
776 | Ignore the hard cases and just back up if this TB happens to | |
777 | be the last one generated. */ | |
5e5f07e0 EV |
778 | if (tcg_ctx.tb_ctx.nb_tbs > 0 && |
779 | tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) { | |
0b0d3320 | 780 | tcg_ctx.code_gen_ptr = tb->tc_ptr; |
5e5f07e0 | 781 | tcg_ctx.tb_ctx.nb_tbs--; |
5b6dd868 BS |
782 | } |
783 | } | |
784 | ||
785 | static inline void invalidate_page_bitmap(PageDesc *p) | |
786 | { | |
012aef07 MA |
787 | g_free(p->code_bitmap); |
788 | p->code_bitmap = NULL; | |
5b6dd868 BS |
789 | p->code_write_count = 0; |
790 | } | |
791 | ||
792 | /* Set to NULL all the 'first_tb' fields in all PageDescs. */ | |
793 | static void page_flush_tb_1(int level, void **lp) | |
794 | { | |
795 | int i; | |
796 | ||
797 | if (*lp == NULL) { | |
798 | return; | |
799 | } | |
800 | if (level == 0) { | |
801 | PageDesc *pd = *lp; | |
802 | ||
03f49957 | 803 | for (i = 0; i < V_L2_SIZE; ++i) { |
5b6dd868 BS |
804 | pd[i].first_tb = NULL; |
805 | invalidate_page_bitmap(pd + i); | |
806 | } | |
807 | } else { | |
808 | void **pp = *lp; | |
809 | ||
03f49957 | 810 | for (i = 0; i < V_L2_SIZE; ++i) { |
5b6dd868 BS |
811 | page_flush_tb_1(level - 1, pp + i); |
812 | } | |
813 | } | |
814 | } | |
815 | ||
816 | static void page_flush_tb(void) | |
817 | { | |
818 | int i; | |
819 | ||
820 | for (i = 0; i < V_L1_SIZE; i++) { | |
03f49957 | 821 | page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i); |
5b6dd868 BS |
822 | } |
823 | } | |
824 | ||
825 | /* flush all the translation blocks */ | |
826 | /* XXX: tb_flush is currently not thread safe */ | |
bbd77c18 | 827 | void tb_flush(CPUState *cpu) |
5b6dd868 | 828 | { |
5b6dd868 BS |
829 | #if defined(DEBUG_FLUSH) |
830 | printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", | |
0b0d3320 | 831 | (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer), |
5e5f07e0 | 832 | tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ? |
0b0d3320 | 833 | ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) / |
5e5f07e0 | 834 | tcg_ctx.tb_ctx.nb_tbs : 0); |
5b6dd868 | 835 | #endif |
0b0d3320 EV |
836 | if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) |
837 | > tcg_ctx.code_gen_buffer_size) { | |
a47dddd7 | 838 | cpu_abort(cpu, "Internal error: code buffer overflow\n"); |
5b6dd868 | 839 | } |
5e5f07e0 | 840 | tcg_ctx.tb_ctx.nb_tbs = 0; |
5b6dd868 | 841 | |
bdc44640 | 842 | CPU_FOREACH(cpu) { |
8cd70437 | 843 | memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); |
5b6dd868 BS |
844 | } |
845 | ||
eb2535f4 | 846 | memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash)); |
5b6dd868 BS |
847 | page_flush_tb(); |
848 | ||
0b0d3320 | 849 | tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer; |
5b6dd868 BS |
850 | /* XXX: flush processor icache at this point if cache flush is |
851 | expensive */ | |
5e5f07e0 | 852 | tcg_ctx.tb_ctx.tb_flush_count++; |
5b6dd868 BS |
853 | } |
854 | ||
855 | #ifdef DEBUG_TB_CHECK | |
856 | ||
857 | static void tb_invalidate_check(target_ulong address) | |
858 | { | |
859 | TranslationBlock *tb; | |
860 | int i; | |
861 | ||
862 | address &= TARGET_PAGE_MASK; | |
863 | for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { | |
5e5f07e0 | 864 | for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { |
5b6dd868 BS |
865 | if (!(address + TARGET_PAGE_SIZE <= tb->pc || |
866 | address >= tb->pc + tb->size)) { | |
867 | printf("ERROR invalidate: address=" TARGET_FMT_lx | |
868 | " PC=%08lx size=%04x\n", | |
869 | address, (long)tb->pc, tb->size); | |
870 | } | |
871 | } | |
872 | } | |
873 | } | |
874 | ||
875 | /* verify that all the pages have correct rights for code */ | |
876 | static void tb_page_check(void) | |
877 | { | |
878 | TranslationBlock *tb; | |
879 | int i, flags1, flags2; | |
880 | ||
881 | for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { | |
5e5f07e0 EV |
882 | for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL; |
883 | tb = tb->phys_hash_next) { | |
5b6dd868 BS |
884 | flags1 = page_get_flags(tb->pc); |
885 | flags2 = page_get_flags(tb->pc + tb->size - 1); | |
886 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | |
887 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | |
888 | (long)tb->pc, tb->size, flags1, flags2); | |
889 | } | |
890 | } | |
891 | } | |
892 | } | |
893 | ||
894 | #endif | |
895 | ||
0c884d16 | 896 | static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb) |
5b6dd868 BS |
897 | { |
898 | TranslationBlock *tb1; | |
899 | ||
900 | for (;;) { | |
901 | tb1 = *ptb; | |
902 | if (tb1 == tb) { | |
0c884d16 | 903 | *ptb = tb1->phys_hash_next; |
5b6dd868 BS |
904 | break; |
905 | } | |
0c884d16 | 906 | ptb = &tb1->phys_hash_next; |
5b6dd868 BS |
907 | } |
908 | } | |
909 | ||
910 | static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) | |
911 | { | |
912 | TranslationBlock *tb1; | |
913 | unsigned int n1; | |
914 | ||
915 | for (;;) { | |
916 | tb1 = *ptb; | |
917 | n1 = (uintptr_t)tb1 & 3; | |
918 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
919 | if (tb1 == tb) { | |
920 | *ptb = tb1->page_next[n1]; | |
921 | break; | |
922 | } | |
923 | ptb = &tb1->page_next[n1]; | |
924 | } | |
925 | } | |
926 | ||
927 | static inline void tb_jmp_remove(TranslationBlock *tb, int n) | |
928 | { | |
929 | TranslationBlock *tb1, **ptb; | |
930 | unsigned int n1; | |
931 | ||
932 | ptb = &tb->jmp_next[n]; | |
933 | tb1 = *ptb; | |
934 | if (tb1) { | |
935 | /* find tb(n) in circular list */ | |
936 | for (;;) { | |
937 | tb1 = *ptb; | |
938 | n1 = (uintptr_t)tb1 & 3; | |
939 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
940 | if (n1 == n && tb1 == tb) { | |
941 | break; | |
942 | } | |
943 | if (n1 == 2) { | |
944 | ptb = &tb1->jmp_first; | |
945 | } else { | |
946 | ptb = &tb1->jmp_next[n1]; | |
947 | } | |
948 | } | |
949 | /* now we can suppress tb(n) from the list */ | |
950 | *ptb = tb->jmp_next[n]; | |
951 | ||
952 | tb->jmp_next[n] = NULL; | |
953 | } | |
954 | } | |
955 | ||
956 | /* reset the jump entry 'n' of a TB so that it is not chained to | |
957 | another TB */ | |
958 | static inline void tb_reset_jump(TranslationBlock *tb, int n) | |
959 | { | |
960 | tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n])); | |
961 | } | |
962 | ||
0c884d16 | 963 | /* invalidate one TB */ |
5b6dd868 BS |
964 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) |
965 | { | |
182735ef | 966 | CPUState *cpu; |
5b6dd868 BS |
967 | PageDesc *p; |
968 | unsigned int h, n1; | |
969 | tb_page_addr_t phys_pc; | |
970 | TranslationBlock *tb1, *tb2; | |
971 | ||
972 | /* remove the TB from the hash list */ | |
973 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
974 | h = tb_phys_hash_func(phys_pc); | |
5e5f07e0 | 975 | tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb); |
5b6dd868 BS |
976 | |
977 | /* remove the TB from the page list */ | |
978 | if (tb->page_addr[0] != page_addr) { | |
979 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | |
980 | tb_page_remove(&p->first_tb, tb); | |
981 | invalidate_page_bitmap(p); | |
982 | } | |
983 | if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { | |
984 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | |
985 | tb_page_remove(&p->first_tb, tb); | |
986 | invalidate_page_bitmap(p); | |
987 | } | |
988 | ||
5e5f07e0 | 989 | tcg_ctx.tb_ctx.tb_invalidated_flag = 1; |
5b6dd868 BS |
990 | |
991 | /* remove the TB from the hash list */ | |
992 | h = tb_jmp_cache_hash_func(tb->pc); | |
bdc44640 | 993 | CPU_FOREACH(cpu) { |
8cd70437 AF |
994 | if (cpu->tb_jmp_cache[h] == tb) { |
995 | cpu->tb_jmp_cache[h] = NULL; | |
5b6dd868 BS |
996 | } |
997 | } | |
998 | ||
999 | /* suppress this TB from the two jump lists */ | |
1000 | tb_jmp_remove(tb, 0); | |
1001 | tb_jmp_remove(tb, 1); | |
1002 | ||
1003 | /* suppress any remaining jumps to this TB */ | |
1004 | tb1 = tb->jmp_first; | |
1005 | for (;;) { | |
1006 | n1 = (uintptr_t)tb1 & 3; | |
1007 | if (n1 == 2) { | |
1008 | break; | |
1009 | } | |
1010 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
1011 | tb2 = tb1->jmp_next[n1]; | |
1012 | tb_reset_jump(tb1, n1); | |
1013 | tb1->jmp_next[n1] = NULL; | |
1014 | tb1 = tb2; | |
1015 | } | |
1016 | tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */ | |
1017 | ||
5e5f07e0 | 1018 | tcg_ctx.tb_ctx.tb_phys_invalidate_count++; |
5b6dd868 BS |
1019 | } |
1020 | ||
5b6dd868 BS |
1021 | static void build_page_bitmap(PageDesc *p) |
1022 | { | |
1023 | int n, tb_start, tb_end; | |
1024 | TranslationBlock *tb; | |
1025 | ||
510a647f | 1026 | p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); |
5b6dd868 BS |
1027 | |
1028 | tb = p->first_tb; | |
1029 | while (tb != NULL) { | |
1030 | n = (uintptr_t)tb & 3; | |
1031 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1032 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
1033 | if (n == 0) { | |
1034 | /* NOTE: tb_end may be after the end of the page, but | |
1035 | it is not a problem */ | |
1036 | tb_start = tb->pc & ~TARGET_PAGE_MASK; | |
1037 | tb_end = tb_start + tb->size; | |
1038 | if (tb_end > TARGET_PAGE_SIZE) { | |
1039 | tb_end = TARGET_PAGE_SIZE; | |
1040 | } | |
1041 | } else { | |
1042 | tb_start = 0; | |
1043 | tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
1044 | } | |
510a647f | 1045 | bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); |
5b6dd868 BS |
1046 | tb = tb->page_next[n]; |
1047 | } | |
1048 | } | |
1049 | ||
75692087 | 1050 | /* Called with mmap_lock held for user mode emulation. */ |
648f034c | 1051 | TranslationBlock *tb_gen_code(CPUState *cpu, |
5b6dd868 BS |
1052 | target_ulong pc, target_ulong cs_base, |
1053 | int flags, int cflags) | |
1054 | { | |
648f034c | 1055 | CPUArchState *env = cpu->env_ptr; |
5b6dd868 | 1056 | TranslationBlock *tb; |
5b6dd868 BS |
1057 | tb_page_addr_t phys_pc, phys_page2; |
1058 | target_ulong virt_page2; | |
fec88f64 | 1059 | tcg_insn_unit *gen_code_buf; |
fca8a500 | 1060 | int gen_code_size, search_size; |
fec88f64 RH |
1061 | #ifdef CONFIG_PROFILER |
1062 | int64_t ti; | |
1063 | #endif | |
5b6dd868 BS |
1064 | |
1065 | phys_pc = get_page_addr_code(env, pc); | |
56c0269a | 1066 | if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) { |
0266359e PB |
1067 | cflags |= CF_USE_ICOUNT; |
1068 | } | |
b125f9dc | 1069 | |
5b6dd868 | 1070 | tb = tb_alloc(pc); |
b125f9dc RH |
1071 | if (unlikely(!tb)) { |
1072 | buffer_overflow: | |
5b6dd868 | 1073 | /* flush must be done */ |
bbd77c18 | 1074 | tb_flush(cpu); |
5b6dd868 BS |
1075 | /* cannot fail at this point */ |
1076 | tb = tb_alloc(pc); | |
b125f9dc | 1077 | assert(tb != NULL); |
5b6dd868 | 1078 | /* Don't forget to invalidate previous TB info. */ |
5e5f07e0 | 1079 | tcg_ctx.tb_ctx.tb_invalidated_flag = 1; |
5b6dd868 | 1080 | } |
fec88f64 RH |
1081 | |
1082 | gen_code_buf = tcg_ctx.code_gen_ptr; | |
1083 | tb->tc_ptr = gen_code_buf; | |
5b6dd868 BS |
1084 | tb->cs_base = cs_base; |
1085 | tb->flags = flags; | |
1086 | tb->cflags = cflags; | |
fec88f64 RH |
1087 | |
1088 | #ifdef CONFIG_PROFILER | |
1089 | tcg_ctx.tb_count1++; /* includes aborted translations because of | |
1090 | exceptions */ | |
1091 | ti = profile_getclock(); | |
1092 | #endif | |
1093 | ||
1094 | tcg_func_start(&tcg_ctx); | |
1095 | ||
1096 | gen_intermediate_code(env, tb); | |
1097 | ||
1098 | trace_translate_block(tb, tb->pc, tb->tc_ptr); | |
1099 | ||
1100 | /* generate machine code */ | |
1101 | tb->tb_next_offset[0] = 0xffff; | |
1102 | tb->tb_next_offset[1] = 0xffff; | |
1103 | tcg_ctx.tb_next_offset = tb->tb_next_offset; | |
1104 | #ifdef USE_DIRECT_JUMP | |
1105 | tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset; | |
1106 | tcg_ctx.tb_next = NULL; | |
1107 | #else | |
1108 | tcg_ctx.tb_jmp_offset = NULL; | |
1109 | tcg_ctx.tb_next = tb->tb_next; | |
1110 | #endif | |
1111 | ||
1112 | #ifdef CONFIG_PROFILER | |
1113 | tcg_ctx.tb_count++; | |
1114 | tcg_ctx.interm_time += profile_getclock() - ti; | |
1115 | tcg_ctx.code_time -= profile_getclock(); | |
1116 | #endif | |
1117 | ||
b125f9dc RH |
1118 | /* ??? Overflow could be handled better here. In particular, we |
1119 | don't need to re-do gen_intermediate_code, nor should we re-do | |
1120 | the tcg optimization currently hidden inside tcg_gen_code. All | |
1121 | that should be required is to flush the TBs, allocate a new TB, | |
1122 | re-initialize it per above, and re-do the actual code generation. */ | |
fec88f64 | 1123 | gen_code_size = tcg_gen_code(&tcg_ctx, gen_code_buf); |
b125f9dc RH |
1124 | if (unlikely(gen_code_size < 0)) { |
1125 | goto buffer_overflow; | |
1126 | } | |
fca8a500 | 1127 | search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); |
b125f9dc RH |
1128 | if (unlikely(search_size < 0)) { |
1129 | goto buffer_overflow; | |
1130 | } | |
fec88f64 RH |
1131 | |
1132 | #ifdef CONFIG_PROFILER | |
1133 | tcg_ctx.code_time += profile_getclock(); | |
1134 | tcg_ctx.code_in_len += tb->size; | |
1135 | tcg_ctx.code_out_len += gen_code_size; | |
fca8a500 | 1136 | tcg_ctx.search_out_len += search_size; |
fec88f64 RH |
1137 | #endif |
1138 | ||
1139 | #ifdef DEBUG_DISAS | |
1140 | if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { | |
1141 | qemu_log("OUT: [size=%d]\n", gen_code_size); | |
1142 | log_disas(tb->tc_ptr, gen_code_size); | |
1143 | qemu_log("\n"); | |
1144 | qemu_log_flush(); | |
1145 | } | |
1146 | #endif | |
1147 | ||
fca8a500 RH |
1148 | tcg_ctx.code_gen_ptr = (void *) |
1149 | ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, | |
1150 | CODE_GEN_ALIGN); | |
5b6dd868 BS |
1151 | |
1152 | /* check next page if needed */ | |
1153 | virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; | |
1154 | phys_page2 = -1; | |
1155 | if ((pc & TARGET_PAGE_MASK) != virt_page2) { | |
1156 | phys_page2 = get_page_addr_code(env, virt_page2); | |
1157 | } | |
1158 | tb_link_page(tb, phys_pc, phys_page2); | |
1159 | return tb; | |
1160 | } | |
1161 | ||
1162 | /* | |
1163 | * Invalidate all TBs which intersect with the target physical address range | |
1164 | * [start;end[. NOTE: start and end may refer to *different* physical pages. | |
1165 | * 'is_cpu_write_access' should be true if called from a real cpu write | |
1166 | * access: the virtual CPU will exit the current TB if code is modified inside | |
1167 | * this TB. | |
75692087 PB |
1168 | * |
1169 | * Called with mmap_lock held for user-mode emulation | |
5b6dd868 | 1170 | */ |
35865339 | 1171 | void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) |
5b6dd868 BS |
1172 | { |
1173 | while (start < end) { | |
35865339 | 1174 | tb_invalidate_phys_page_range(start, end, 0); |
5b6dd868 BS |
1175 | start &= TARGET_PAGE_MASK; |
1176 | start += TARGET_PAGE_SIZE; | |
1177 | } | |
1178 | } | |
1179 | ||
1180 | /* | |
1181 | * Invalidate all TBs which intersect with the target physical address range | |
1182 | * [start;end[. NOTE: start and end must refer to the *same* physical page. | |
1183 | * 'is_cpu_write_access' should be true if called from a real cpu write | |
1184 | * access: the virtual CPU will exit the current TB if code is modified inside | |
1185 | * this TB. | |
75692087 PB |
1186 | * |
1187 | * Called with mmap_lock held for user-mode emulation | |
5b6dd868 BS |
1188 | */ |
1189 | void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, | |
1190 | int is_cpu_write_access) | |
1191 | { | |
1192 | TranslationBlock *tb, *tb_next, *saved_tb; | |
4917cf44 | 1193 | CPUState *cpu = current_cpu; |
baea4fae | 1194 | #if defined(TARGET_HAS_PRECISE_SMC) |
4917cf44 AF |
1195 | CPUArchState *env = NULL; |
1196 | #endif | |
5b6dd868 BS |
1197 | tb_page_addr_t tb_start, tb_end; |
1198 | PageDesc *p; | |
1199 | int n; | |
1200 | #ifdef TARGET_HAS_PRECISE_SMC | |
1201 | int current_tb_not_found = is_cpu_write_access; | |
1202 | TranslationBlock *current_tb = NULL; | |
1203 | int current_tb_modified = 0; | |
1204 | target_ulong current_pc = 0; | |
1205 | target_ulong current_cs_base = 0; | |
1206 | int current_flags = 0; | |
1207 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1208 | ||
1209 | p = page_find(start >> TARGET_PAGE_BITS); | |
1210 | if (!p) { | |
1211 | return; | |
1212 | } | |
baea4fae | 1213 | #if defined(TARGET_HAS_PRECISE_SMC) |
4917cf44 AF |
1214 | if (cpu != NULL) { |
1215 | env = cpu->env_ptr; | |
d77953b9 | 1216 | } |
4917cf44 | 1217 | #endif |
5b6dd868 BS |
1218 | |
1219 | /* we remove all the TBs in the range [start, end[ */ | |
1220 | /* XXX: see if in some cases it could be faster to invalidate all | |
1221 | the code */ | |
1222 | tb = p->first_tb; | |
1223 | while (tb != NULL) { | |
1224 | n = (uintptr_t)tb & 3; | |
1225 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1226 | tb_next = tb->page_next[n]; | |
1227 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
1228 | if (n == 0) { | |
1229 | /* NOTE: tb_end may be after the end of the page, but | |
1230 | it is not a problem */ | |
1231 | tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
1232 | tb_end = tb_start + tb->size; | |
1233 | } else { | |
1234 | tb_start = tb->page_addr[1]; | |
1235 | tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
1236 | } | |
1237 | if (!(tb_end <= start || tb_start >= end)) { | |
1238 | #ifdef TARGET_HAS_PRECISE_SMC | |
1239 | if (current_tb_not_found) { | |
1240 | current_tb_not_found = 0; | |
1241 | current_tb = NULL; | |
93afeade | 1242 | if (cpu->mem_io_pc) { |
5b6dd868 | 1243 | /* now we have a real cpu fault */ |
93afeade | 1244 | current_tb = tb_find_pc(cpu->mem_io_pc); |
5b6dd868 BS |
1245 | } |
1246 | } | |
1247 | if (current_tb == tb && | |
1248 | (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
1249 | /* If we are modifying the current TB, we must stop | |
1250 | its execution. We could be more precise by checking | |
1251 | that the modification is after the current PC, but it | |
1252 | would require a specialized function to partially | |
1253 | restore the CPU state */ | |
1254 | ||
1255 | current_tb_modified = 1; | |
74f10515 | 1256 | cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc); |
5b6dd868 BS |
1257 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
1258 | ¤t_flags); | |
1259 | } | |
1260 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1261 | /* we need to do that to handle the case where a signal | |
1262 | occurs while doing tb_phys_invalidate() */ | |
1263 | saved_tb = NULL; | |
d77953b9 AF |
1264 | if (cpu != NULL) { |
1265 | saved_tb = cpu->current_tb; | |
1266 | cpu->current_tb = NULL; | |
5b6dd868 BS |
1267 | } |
1268 | tb_phys_invalidate(tb, -1); | |
d77953b9 AF |
1269 | if (cpu != NULL) { |
1270 | cpu->current_tb = saved_tb; | |
c3affe56 AF |
1271 | if (cpu->interrupt_request && cpu->current_tb) { |
1272 | cpu_interrupt(cpu, cpu->interrupt_request); | |
5b6dd868 BS |
1273 | } |
1274 | } | |
1275 | } | |
1276 | tb = tb_next; | |
1277 | } | |
1278 | #if !defined(CONFIG_USER_ONLY) | |
1279 | /* if no code remaining, no need to continue to use slow writes */ | |
1280 | if (!p->first_tb) { | |
1281 | invalidate_page_bitmap(p); | |
fc377bcf | 1282 | tlb_unprotect_code(start); |
5b6dd868 BS |
1283 | } |
1284 | #endif | |
1285 | #ifdef TARGET_HAS_PRECISE_SMC | |
1286 | if (current_tb_modified) { | |
1287 | /* we generate a block containing just the instruction | |
1288 | modifying the memory. It will ensure that it cannot modify | |
1289 | itself */ | |
d77953b9 | 1290 | cpu->current_tb = NULL; |
648f034c | 1291 | tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); |
0ea8cb88 | 1292 | cpu_resume_from_signal(cpu, NULL); |
5b6dd868 BS |
1293 | } |
1294 | #endif | |
1295 | } | |
1296 | ||
1297 | /* len must be <= 8 and start must be a multiple of len */ | |
1298 | void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) | |
1299 | { | |
1300 | PageDesc *p; | |
5b6dd868 BS |
1301 | |
1302 | #if 0 | |
1303 | if (1) { | |
1304 | qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", | |
1305 | cpu_single_env->mem_io_vaddr, len, | |
1306 | cpu_single_env->eip, | |
1307 | cpu_single_env->eip + | |
1308 | (intptr_t)cpu_single_env->segs[R_CS].base); | |
1309 | } | |
1310 | #endif | |
1311 | p = page_find(start >> TARGET_PAGE_BITS); | |
1312 | if (!p) { | |
1313 | return; | |
1314 | } | |
fc377bcf PB |
1315 | if (!p->code_bitmap && |
1316 | ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { | |
1317 | /* build code bitmap */ | |
1318 | build_page_bitmap(p); | |
1319 | } | |
5b6dd868 | 1320 | if (p->code_bitmap) { |
510a647f EC |
1321 | unsigned int nr; |
1322 | unsigned long b; | |
1323 | ||
1324 | nr = start & ~TARGET_PAGE_MASK; | |
1325 | b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); | |
5b6dd868 BS |
1326 | if (b & ((1 << len) - 1)) { |
1327 | goto do_invalidate; | |
1328 | } | |
1329 | } else { | |
1330 | do_invalidate: | |
1331 | tb_invalidate_phys_page_range(start, start + len, 1); | |
1332 | } | |
1333 | } | |
1334 | ||
1335 | #if !defined(CONFIG_SOFTMMU) | |
75692087 | 1336 | /* Called with mmap_lock held. */ |
5b6dd868 | 1337 | static void tb_invalidate_phys_page(tb_page_addr_t addr, |
d02532f0 AG |
1338 | uintptr_t pc, void *puc, |
1339 | bool locked) | |
5b6dd868 BS |
1340 | { |
1341 | TranslationBlock *tb; | |
1342 | PageDesc *p; | |
1343 | int n; | |
1344 | #ifdef TARGET_HAS_PRECISE_SMC | |
1345 | TranslationBlock *current_tb = NULL; | |
4917cf44 AF |
1346 | CPUState *cpu = current_cpu; |
1347 | CPUArchState *env = NULL; | |
5b6dd868 BS |
1348 | int current_tb_modified = 0; |
1349 | target_ulong current_pc = 0; | |
1350 | target_ulong current_cs_base = 0; | |
1351 | int current_flags = 0; | |
1352 | #endif | |
1353 | ||
1354 | addr &= TARGET_PAGE_MASK; | |
1355 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1356 | if (!p) { | |
1357 | return; | |
1358 | } | |
1359 | tb = p->first_tb; | |
1360 | #ifdef TARGET_HAS_PRECISE_SMC | |
1361 | if (tb && pc != 0) { | |
1362 | current_tb = tb_find_pc(pc); | |
1363 | } | |
4917cf44 AF |
1364 | if (cpu != NULL) { |
1365 | env = cpu->env_ptr; | |
d77953b9 | 1366 | } |
5b6dd868 BS |
1367 | #endif |
1368 | while (tb != NULL) { | |
1369 | n = (uintptr_t)tb & 3; | |
1370 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1371 | #ifdef TARGET_HAS_PRECISE_SMC | |
1372 | if (current_tb == tb && | |
1373 | (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
1374 | /* If we are modifying the current TB, we must stop | |
1375 | its execution. We could be more precise by checking | |
1376 | that the modification is after the current PC, but it | |
1377 | would require a specialized function to partially | |
1378 | restore the CPU state */ | |
1379 | ||
1380 | current_tb_modified = 1; | |
74f10515 | 1381 | cpu_restore_state_from_tb(cpu, current_tb, pc); |
5b6dd868 BS |
1382 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
1383 | ¤t_flags); | |
1384 | } | |
1385 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1386 | tb_phys_invalidate(tb, addr); | |
1387 | tb = tb->page_next[n]; | |
1388 | } | |
1389 | p->first_tb = NULL; | |
1390 | #ifdef TARGET_HAS_PRECISE_SMC | |
1391 | if (current_tb_modified) { | |
1392 | /* we generate a block containing just the instruction | |
1393 | modifying the memory. It will ensure that it cannot modify | |
1394 | itself */ | |
d77953b9 | 1395 | cpu->current_tb = NULL; |
648f034c | 1396 | tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); |
d02532f0 AG |
1397 | if (locked) { |
1398 | mmap_unlock(); | |
1399 | } | |
0ea8cb88 | 1400 | cpu_resume_from_signal(cpu, puc); |
5b6dd868 BS |
1401 | } |
1402 | #endif | |
1403 | } | |
1404 | #endif | |
1405 | ||
75692087 PB |
1406 | /* add the tb in the target page and protect it if necessary |
1407 | * | |
1408 | * Called with mmap_lock held for user-mode emulation. | |
1409 | */ | |
5b6dd868 BS |
1410 | static inline void tb_alloc_page(TranslationBlock *tb, |
1411 | unsigned int n, tb_page_addr_t page_addr) | |
1412 | { | |
1413 | PageDesc *p; | |
1414 | #ifndef CONFIG_USER_ONLY | |
1415 | bool page_already_protected; | |
1416 | #endif | |
1417 | ||
1418 | tb->page_addr[n] = page_addr; | |
1419 | p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); | |
1420 | tb->page_next[n] = p->first_tb; | |
1421 | #ifndef CONFIG_USER_ONLY | |
1422 | page_already_protected = p->first_tb != NULL; | |
1423 | #endif | |
1424 | p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); | |
1425 | invalidate_page_bitmap(p); | |
1426 | ||
5b6dd868 BS |
1427 | #if defined(CONFIG_USER_ONLY) |
1428 | if (p->flags & PAGE_WRITE) { | |
1429 | target_ulong addr; | |
1430 | PageDesc *p2; | |
1431 | int prot; | |
1432 | ||
1433 | /* force the host page as non writable (writes will have a | |
1434 | page fault + mprotect overhead) */ | |
1435 | page_addr &= qemu_host_page_mask; | |
1436 | prot = 0; | |
1437 | for (addr = page_addr; addr < page_addr + qemu_host_page_size; | |
1438 | addr += TARGET_PAGE_SIZE) { | |
1439 | ||
1440 | p2 = page_find(addr >> TARGET_PAGE_BITS); | |
1441 | if (!p2) { | |
1442 | continue; | |
1443 | } | |
1444 | prot |= p2->flags; | |
1445 | p2->flags &= ~PAGE_WRITE; | |
1446 | } | |
1447 | mprotect(g2h(page_addr), qemu_host_page_size, | |
1448 | (prot & PAGE_BITS) & ~PAGE_WRITE); | |
1449 | #ifdef DEBUG_TB_INVALIDATE | |
1450 | printf("protecting code page: 0x" TARGET_FMT_lx "\n", | |
1451 | page_addr); | |
1452 | #endif | |
1453 | } | |
1454 | #else | |
1455 | /* if some code is already present, then the pages are already | |
1456 | protected. So we handle the case where only the first TB is | |
1457 | allocated in a physical page */ | |
1458 | if (!page_already_protected) { | |
1459 | tlb_protect_code(page_addr); | |
1460 | } | |
1461 | #endif | |
5b6dd868 BS |
1462 | } |
1463 | ||
1464 | /* add a new TB and link it to the physical page tables. phys_page2 is | |
75692087 | 1465 | * (-1) to indicate that only one page contains the TB. |
9fd1a948 PB |
1466 | * |
1467 | * Called with mmap_lock held for user-mode emulation. | |
75692087 | 1468 | */ |
5b6dd868 BS |
1469 | static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, |
1470 | tb_page_addr_t phys_page2) | |
1471 | { | |
1472 | unsigned int h; | |
1473 | TranslationBlock **ptb; | |
1474 | ||
5b6dd868 BS |
1475 | /* add in the physical hash table */ |
1476 | h = tb_phys_hash_func(phys_pc); | |
5e5f07e0 | 1477 | ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h]; |
5b6dd868 BS |
1478 | tb->phys_hash_next = *ptb; |
1479 | *ptb = tb; | |
1480 | ||
1481 | /* add in the page list */ | |
1482 | tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); | |
1483 | if (phys_page2 != -1) { | |
1484 | tb_alloc_page(tb, 1, phys_page2); | |
1485 | } else { | |
1486 | tb->page_addr[1] = -1; | |
1487 | } | |
1488 | ||
1489 | tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); | |
1490 | tb->jmp_next[0] = NULL; | |
1491 | tb->jmp_next[1] = NULL; | |
1492 | ||
1493 | /* init original jump addresses */ | |
1494 | if (tb->tb_next_offset[0] != 0xffff) { | |
1495 | tb_reset_jump(tb, 0); | |
1496 | } | |
1497 | if (tb->tb_next_offset[1] != 0xffff) { | |
1498 | tb_reset_jump(tb, 1); | |
1499 | } | |
1500 | ||
1501 | #ifdef DEBUG_TB_CHECK | |
1502 | tb_page_check(); | |
1503 | #endif | |
5b6dd868 BS |
1504 | } |
1505 | ||
5b6dd868 BS |
1506 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
1507 | tb[1].tc_ptr. Return NULL if not found */ | |
a8a826a3 | 1508 | static TranslationBlock *tb_find_pc(uintptr_t tc_ptr) |
5b6dd868 BS |
1509 | { |
1510 | int m_min, m_max, m; | |
1511 | uintptr_t v; | |
1512 | TranslationBlock *tb; | |
1513 | ||
5e5f07e0 | 1514 | if (tcg_ctx.tb_ctx.nb_tbs <= 0) { |
5b6dd868 BS |
1515 | return NULL; |
1516 | } | |
0b0d3320 EV |
1517 | if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer || |
1518 | tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) { | |
5b6dd868 BS |
1519 | return NULL; |
1520 | } | |
1521 | /* binary search (cf Knuth) */ | |
1522 | m_min = 0; | |
5e5f07e0 | 1523 | m_max = tcg_ctx.tb_ctx.nb_tbs - 1; |
5b6dd868 BS |
1524 | while (m_min <= m_max) { |
1525 | m = (m_min + m_max) >> 1; | |
5e5f07e0 | 1526 | tb = &tcg_ctx.tb_ctx.tbs[m]; |
5b6dd868 BS |
1527 | v = (uintptr_t)tb->tc_ptr; |
1528 | if (v == tc_ptr) { | |
1529 | return tb; | |
1530 | } else if (tc_ptr < v) { | |
1531 | m_max = m - 1; | |
1532 | } else { | |
1533 | m_min = m + 1; | |
1534 | } | |
1535 | } | |
5e5f07e0 | 1536 | return &tcg_ctx.tb_ctx.tbs[m_max]; |
5b6dd868 BS |
1537 | } |
1538 | ||
ec53b45b | 1539 | #if !defined(CONFIG_USER_ONLY) |
29d8ec7b | 1540 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) |
5b6dd868 BS |
1541 | { |
1542 | ram_addr_t ram_addr; | |
5c8a00ce | 1543 | MemoryRegion *mr; |
149f54b5 | 1544 | hwaddr l = 1; |
5b6dd868 | 1545 | |
41063e1e | 1546 | rcu_read_lock(); |
29d8ec7b | 1547 | mr = address_space_translate(as, addr, &addr, &l, false); |
5c8a00ce PB |
1548 | if (!(memory_region_is_ram(mr) |
1549 | || memory_region_is_romd(mr))) { | |
41063e1e | 1550 | rcu_read_unlock(); |
5b6dd868 BS |
1551 | return; |
1552 | } | |
5c8a00ce | 1553 | ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK) |
149f54b5 | 1554 | + addr; |
5b6dd868 | 1555 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
41063e1e | 1556 | rcu_read_unlock(); |
5b6dd868 | 1557 | } |
ec53b45b | 1558 | #endif /* !defined(CONFIG_USER_ONLY) */ |
5b6dd868 | 1559 | |
239c51a5 | 1560 | void tb_check_watchpoint(CPUState *cpu) |
5b6dd868 BS |
1561 | { |
1562 | TranslationBlock *tb; | |
1563 | ||
93afeade | 1564 | tb = tb_find_pc(cpu->mem_io_pc); |
8d302e76 AJ |
1565 | if (tb) { |
1566 | /* We can use retranslation to find the PC. */ | |
1567 | cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc); | |
1568 | tb_phys_invalidate(tb, -1); | |
1569 | } else { | |
1570 | /* The exception probably happened in a helper. The CPU state should | |
1571 | have been saved before calling it. Fetch the PC from there. */ | |
1572 | CPUArchState *env = cpu->env_ptr; | |
1573 | target_ulong pc, cs_base; | |
1574 | tb_page_addr_t addr; | |
1575 | int flags; | |
1576 | ||
1577 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | |
1578 | addr = get_page_addr_code(env, pc); | |
1579 | tb_invalidate_phys_range(addr, addr + 1); | |
5b6dd868 | 1580 | } |
5b6dd868 BS |
1581 | } |
1582 | ||
1583 | #ifndef CONFIG_USER_ONLY | |
5b6dd868 BS |
1584 | /* in deterministic execution mode, instructions doing device I/Os |
1585 | must be at the end of the TB */ | |
90b40a69 | 1586 | void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) |
5b6dd868 | 1587 | { |
a47dddd7 | 1588 | #if defined(TARGET_MIPS) || defined(TARGET_SH4) |
90b40a69 | 1589 | CPUArchState *env = cpu->env_ptr; |
a47dddd7 | 1590 | #endif |
5b6dd868 BS |
1591 | TranslationBlock *tb; |
1592 | uint32_t n, cflags; | |
1593 | target_ulong pc, cs_base; | |
1594 | uint64_t flags; | |
1595 | ||
1596 | tb = tb_find_pc(retaddr); | |
1597 | if (!tb) { | |
a47dddd7 | 1598 | cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", |
5b6dd868 BS |
1599 | (void *)retaddr); |
1600 | } | |
28ecfd7a | 1601 | n = cpu->icount_decr.u16.low + tb->icount; |
74f10515 | 1602 | cpu_restore_state_from_tb(cpu, tb, retaddr); |
5b6dd868 BS |
1603 | /* Calculate how many instructions had been executed before the fault |
1604 | occurred. */ | |
28ecfd7a | 1605 | n = n - cpu->icount_decr.u16.low; |
5b6dd868 BS |
1606 | /* Generate a new TB ending on the I/O insn. */ |
1607 | n++; | |
1608 | /* On MIPS and SH, delay slot instructions can only be restarted if | |
1609 | they were already the first instruction in the TB. If this is not | |
1610 | the first instruction in a TB then re-execute the preceding | |
1611 | branch. */ | |
1612 | #if defined(TARGET_MIPS) | |
1613 | if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { | |
c3577479 | 1614 | env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); |
28ecfd7a | 1615 | cpu->icount_decr.u16.low++; |
5b6dd868 BS |
1616 | env->hflags &= ~MIPS_HFLAG_BMASK; |
1617 | } | |
1618 | #elif defined(TARGET_SH4) | |
1619 | if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 | |
1620 | && n > 1) { | |
1621 | env->pc -= 2; | |
28ecfd7a | 1622 | cpu->icount_decr.u16.low++; |
5b6dd868 BS |
1623 | env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); |
1624 | } | |
1625 | #endif | |
1626 | /* This should never happen. */ | |
1627 | if (n > CF_COUNT_MASK) { | |
a47dddd7 | 1628 | cpu_abort(cpu, "TB too big during recompile"); |
5b6dd868 BS |
1629 | } |
1630 | ||
1631 | cflags = n | CF_LAST_IO; | |
1632 | pc = tb->pc; | |
1633 | cs_base = tb->cs_base; | |
1634 | flags = tb->flags; | |
1635 | tb_phys_invalidate(tb, -1); | |
02d57ea1 SF |
1636 | if (tb->cflags & CF_NOCACHE) { |
1637 | if (tb->orig_tb) { | |
1638 | /* Invalidate original TB if this TB was generated in | |
1639 | * cpu_exec_nocache() */ | |
1640 | tb_phys_invalidate(tb->orig_tb, -1); | |
1641 | } | |
1642 | tb_free(tb); | |
1643 | } | |
5b6dd868 BS |
1644 | /* FIXME: In theory this could raise an exception. In practice |
1645 | we have already translated the block once so it's probably ok. */ | |
648f034c | 1646 | tb_gen_code(cpu, pc, cs_base, flags, cflags); |
5b6dd868 BS |
1647 | /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not |
1648 | the first in the TB) then we end up generating a whole new TB and | |
1649 | repeating the fault, which is horribly inefficient. | |
1650 | Better would be to execute just this insn uncached, or generate a | |
1651 | second new TB. */ | |
0ea8cb88 | 1652 | cpu_resume_from_signal(cpu, NULL); |
5b6dd868 BS |
1653 | } |
1654 | ||
611d4f99 | 1655 | void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) |
5b6dd868 BS |
1656 | { |
1657 | unsigned int i; | |
1658 | ||
1659 | /* Discard jump cache entries for any tb which might potentially | |
1660 | overlap the flushed page. */ | |
1661 | i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); | |
8cd70437 | 1662 | memset(&cpu->tb_jmp_cache[i], 0, |
5b6dd868 BS |
1663 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); |
1664 | ||
1665 | i = tb_jmp_cache_hash_page(addr); | |
8cd70437 | 1666 | memset(&cpu->tb_jmp_cache[i], 0, |
5b6dd868 BS |
1667 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); |
1668 | } | |
1669 | ||
1670 | void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) | |
1671 | { | |
1672 | int i, target_code_size, max_target_code_size; | |
1673 | int direct_jmp_count, direct_jmp2_count, cross_page; | |
1674 | TranslationBlock *tb; | |
1675 | ||
1676 | target_code_size = 0; | |
1677 | max_target_code_size = 0; | |
1678 | cross_page = 0; | |
1679 | direct_jmp_count = 0; | |
1680 | direct_jmp2_count = 0; | |
5e5f07e0 EV |
1681 | for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) { |
1682 | tb = &tcg_ctx.tb_ctx.tbs[i]; | |
5b6dd868 BS |
1683 | target_code_size += tb->size; |
1684 | if (tb->size > max_target_code_size) { | |
1685 | max_target_code_size = tb->size; | |
1686 | } | |
1687 | if (tb->page_addr[1] != -1) { | |
1688 | cross_page++; | |
1689 | } | |
1690 | if (tb->tb_next_offset[0] != 0xffff) { | |
1691 | direct_jmp_count++; | |
1692 | if (tb->tb_next_offset[1] != 0xffff) { | |
1693 | direct_jmp2_count++; | |
1694 | } | |
1695 | } | |
1696 | } | |
1697 | /* XXX: avoid using doubles ? */ | |
1698 | cpu_fprintf(f, "Translation buffer state:\n"); | |
1699 | cpu_fprintf(f, "gen code size %td/%zd\n", | |
0b0d3320 | 1700 | tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer, |
b125f9dc | 1701 | tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer); |
5b6dd868 | 1702 | cpu_fprintf(f, "TB count %d/%d\n", |
5e5f07e0 | 1703 | tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks); |
5b6dd868 | 1704 | cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", |
5e5f07e0 EV |
1705 | tcg_ctx.tb_ctx.nb_tbs ? target_code_size / |
1706 | tcg_ctx.tb_ctx.nb_tbs : 0, | |
1707 | max_target_code_size); | |
5b6dd868 | 1708 | cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", |
5e5f07e0 EV |
1709 | tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr - |
1710 | tcg_ctx.code_gen_buffer) / | |
1711 | tcg_ctx.tb_ctx.nb_tbs : 0, | |
1712 | target_code_size ? (double) (tcg_ctx.code_gen_ptr - | |
1713 | tcg_ctx.code_gen_buffer) / | |
1714 | target_code_size : 0); | |
1715 | cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page, | |
1716 | tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) / | |
1717 | tcg_ctx.tb_ctx.nb_tbs : 0); | |
5b6dd868 BS |
1718 | cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", |
1719 | direct_jmp_count, | |
5e5f07e0 EV |
1720 | tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) / |
1721 | tcg_ctx.tb_ctx.nb_tbs : 0, | |
5b6dd868 | 1722 | direct_jmp2_count, |
5e5f07e0 EV |
1723 | tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) / |
1724 | tcg_ctx.tb_ctx.nb_tbs : 0); | |
5b6dd868 | 1725 | cpu_fprintf(f, "\nStatistics:\n"); |
5e5f07e0 EV |
1726 | cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count); |
1727 | cpu_fprintf(f, "TB invalidate count %d\n", | |
1728 | tcg_ctx.tb_ctx.tb_phys_invalidate_count); | |
5b6dd868 BS |
1729 | cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); |
1730 | tcg_dump_info(f, cpu_fprintf); | |
1731 | } | |
1732 | ||
246ae24d MF |
1733 | void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf) |
1734 | { | |
1735 | tcg_dump_op_count(f, cpu_fprintf); | |
1736 | } | |
1737 | ||
5b6dd868 BS |
1738 | #else /* CONFIG_USER_ONLY */ |
1739 | ||
c3affe56 | 1740 | void cpu_interrupt(CPUState *cpu, int mask) |
5b6dd868 | 1741 | { |
259186a7 | 1742 | cpu->interrupt_request |= mask; |
378df4b2 | 1743 | cpu->tcg_exit_req = 1; |
5b6dd868 BS |
1744 | } |
1745 | ||
1746 | /* | |
1747 | * Walks guest process memory "regions" one by one | |
1748 | * and calls callback function 'fn' for each region. | |
1749 | */ | |
1750 | struct walk_memory_regions_data { | |
1751 | walk_memory_regions_fn fn; | |
1752 | void *priv; | |
1a1c4db9 | 1753 | target_ulong start; |
5b6dd868 BS |
1754 | int prot; |
1755 | }; | |
1756 | ||
1757 | static int walk_memory_regions_end(struct walk_memory_regions_data *data, | |
1a1c4db9 | 1758 | target_ulong end, int new_prot) |
5b6dd868 | 1759 | { |
1a1c4db9 | 1760 | if (data->start != -1u) { |
5b6dd868 BS |
1761 | int rc = data->fn(data->priv, data->start, end, data->prot); |
1762 | if (rc != 0) { | |
1763 | return rc; | |
1764 | } | |
1765 | } | |
1766 | ||
1a1c4db9 | 1767 | data->start = (new_prot ? end : -1u); |
5b6dd868 BS |
1768 | data->prot = new_prot; |
1769 | ||
1770 | return 0; | |
1771 | } | |
1772 | ||
1773 | static int walk_memory_regions_1(struct walk_memory_regions_data *data, | |
1a1c4db9 | 1774 | target_ulong base, int level, void **lp) |
5b6dd868 | 1775 | { |
1a1c4db9 | 1776 | target_ulong pa; |
5b6dd868 BS |
1777 | int i, rc; |
1778 | ||
1779 | if (*lp == NULL) { | |
1780 | return walk_memory_regions_end(data, base, 0); | |
1781 | } | |
1782 | ||
1783 | if (level == 0) { | |
1784 | PageDesc *pd = *lp; | |
1785 | ||
03f49957 | 1786 | for (i = 0; i < V_L2_SIZE; ++i) { |
5b6dd868 BS |
1787 | int prot = pd[i].flags; |
1788 | ||
1789 | pa = base | (i << TARGET_PAGE_BITS); | |
1790 | if (prot != data->prot) { | |
1791 | rc = walk_memory_regions_end(data, pa, prot); | |
1792 | if (rc != 0) { | |
1793 | return rc; | |
1794 | } | |
1795 | } | |
1796 | } | |
1797 | } else { | |
1798 | void **pp = *lp; | |
1799 | ||
03f49957 | 1800 | for (i = 0; i < V_L2_SIZE; ++i) { |
1a1c4db9 | 1801 | pa = base | ((target_ulong)i << |
03f49957 | 1802 | (TARGET_PAGE_BITS + V_L2_BITS * level)); |
5b6dd868 BS |
1803 | rc = walk_memory_regions_1(data, pa, level - 1, pp + i); |
1804 | if (rc != 0) { | |
1805 | return rc; | |
1806 | } | |
1807 | } | |
1808 | } | |
1809 | ||
1810 | return 0; | |
1811 | } | |
1812 | ||
1813 | int walk_memory_regions(void *priv, walk_memory_regions_fn fn) | |
1814 | { | |
1815 | struct walk_memory_regions_data data; | |
1816 | uintptr_t i; | |
1817 | ||
1818 | data.fn = fn; | |
1819 | data.priv = priv; | |
1a1c4db9 | 1820 | data.start = -1u; |
5b6dd868 BS |
1821 | data.prot = 0; |
1822 | ||
1823 | for (i = 0; i < V_L1_SIZE; i++) { | |
1a1c4db9 | 1824 | int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS), |
03f49957 | 1825 | V_L1_SHIFT / V_L2_BITS - 1, l1_map + i); |
5b6dd868 BS |
1826 | if (rc != 0) { |
1827 | return rc; | |
1828 | } | |
1829 | } | |
1830 | ||
1831 | return walk_memory_regions_end(&data, 0, 0); | |
1832 | } | |
1833 | ||
1a1c4db9 MI |
1834 | static int dump_region(void *priv, target_ulong start, |
1835 | target_ulong end, unsigned long prot) | |
5b6dd868 BS |
1836 | { |
1837 | FILE *f = (FILE *)priv; | |
1838 | ||
1a1c4db9 MI |
1839 | (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx |
1840 | " "TARGET_FMT_lx" %c%c%c\n", | |
5b6dd868 BS |
1841 | start, end, end - start, |
1842 | ((prot & PAGE_READ) ? 'r' : '-'), | |
1843 | ((prot & PAGE_WRITE) ? 'w' : '-'), | |
1844 | ((prot & PAGE_EXEC) ? 'x' : '-')); | |
1845 | ||
1846 | return 0; | |
1847 | } | |
1848 | ||
1849 | /* dump memory mappings */ | |
1850 | void page_dump(FILE *f) | |
1851 | { | |
1a1c4db9 | 1852 | const int length = sizeof(target_ulong) * 2; |
227b8175 SW |
1853 | (void) fprintf(f, "%-*s %-*s %-*s %s\n", |
1854 | length, "start", length, "end", length, "size", "prot"); | |
5b6dd868 BS |
1855 | walk_memory_regions(f, dump_region); |
1856 | } | |
1857 | ||
1858 | int page_get_flags(target_ulong address) | |
1859 | { | |
1860 | PageDesc *p; | |
1861 | ||
1862 | p = page_find(address >> TARGET_PAGE_BITS); | |
1863 | if (!p) { | |
1864 | return 0; | |
1865 | } | |
1866 | return p->flags; | |
1867 | } | |
1868 | ||
1869 | /* Modify the flags of a page and invalidate the code if necessary. | |
1870 | The flag PAGE_WRITE_ORG is positioned automatically depending | |
1871 | on PAGE_WRITE. The mmap_lock should already be held. */ | |
1872 | void page_set_flags(target_ulong start, target_ulong end, int flags) | |
1873 | { | |
1874 | target_ulong addr, len; | |
1875 | ||
1876 | /* This function should never be called with addresses outside the | |
1877 | guest address space. If this assert fires, it probably indicates | |
1878 | a missing call to h2g_valid. */ | |
1879 | #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS | |
1a1c4db9 | 1880 | assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); |
5b6dd868 BS |
1881 | #endif |
1882 | assert(start < end); | |
1883 | ||
1884 | start = start & TARGET_PAGE_MASK; | |
1885 | end = TARGET_PAGE_ALIGN(end); | |
1886 | ||
1887 | if (flags & PAGE_WRITE) { | |
1888 | flags |= PAGE_WRITE_ORG; | |
1889 | } | |
1890 | ||
1891 | for (addr = start, len = end - start; | |
1892 | len != 0; | |
1893 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
1894 | PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); | |
1895 | ||
1896 | /* If the write protection bit is set, then we invalidate | |
1897 | the code inside. */ | |
1898 | if (!(p->flags & PAGE_WRITE) && | |
1899 | (flags & PAGE_WRITE) && | |
1900 | p->first_tb) { | |
d02532f0 | 1901 | tb_invalidate_phys_page(addr, 0, NULL, false); |
5b6dd868 BS |
1902 | } |
1903 | p->flags = flags; | |
1904 | } | |
1905 | } | |
1906 | ||
1907 | int page_check_range(target_ulong start, target_ulong len, int flags) | |
1908 | { | |
1909 | PageDesc *p; | |
1910 | target_ulong end; | |
1911 | target_ulong addr; | |
1912 | ||
1913 | /* This function should never be called with addresses outside the | |
1914 | guest address space. If this assert fires, it probably indicates | |
1915 | a missing call to h2g_valid. */ | |
1916 | #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS | |
1a1c4db9 | 1917 | assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); |
5b6dd868 BS |
1918 | #endif |
1919 | ||
1920 | if (len == 0) { | |
1921 | return 0; | |
1922 | } | |
1923 | if (start + len - 1 < start) { | |
1924 | /* We've wrapped around. */ | |
1925 | return -1; | |
1926 | } | |
1927 | ||
1928 | /* must do before we loose bits in the next step */ | |
1929 | end = TARGET_PAGE_ALIGN(start + len); | |
1930 | start = start & TARGET_PAGE_MASK; | |
1931 | ||
1932 | for (addr = start, len = end - start; | |
1933 | len != 0; | |
1934 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
1935 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1936 | if (!p) { | |
1937 | return -1; | |
1938 | } | |
1939 | if (!(p->flags & PAGE_VALID)) { | |
1940 | return -1; | |
1941 | } | |
1942 | ||
1943 | if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { | |
1944 | return -1; | |
1945 | } | |
1946 | if (flags & PAGE_WRITE) { | |
1947 | if (!(p->flags & PAGE_WRITE_ORG)) { | |
1948 | return -1; | |
1949 | } | |
1950 | /* unprotect the page if it was put read-only because it | |
1951 | contains translated code */ | |
1952 | if (!(p->flags & PAGE_WRITE)) { | |
1953 | if (!page_unprotect(addr, 0, NULL)) { | |
1954 | return -1; | |
1955 | } | |
1956 | } | |
5b6dd868 BS |
1957 | } |
1958 | } | |
1959 | return 0; | |
1960 | } | |
1961 | ||
1962 | /* called from signal handler: invalidate the code and unprotect the | |
1963 | page. Return TRUE if the fault was successfully handled. */ | |
1964 | int page_unprotect(target_ulong address, uintptr_t pc, void *puc) | |
1965 | { | |
1966 | unsigned int prot; | |
1967 | PageDesc *p; | |
1968 | target_ulong host_start, host_end, addr; | |
1969 | ||
1970 | /* Technically this isn't safe inside a signal handler. However we | |
1971 | know this only ever happens in a synchronous SEGV handler, so in | |
1972 | practice it seems to be ok. */ | |
1973 | mmap_lock(); | |
1974 | ||
1975 | p = page_find(address >> TARGET_PAGE_BITS); | |
1976 | if (!p) { | |
1977 | mmap_unlock(); | |
1978 | return 0; | |
1979 | } | |
1980 | ||
1981 | /* if the page was really writable, then we change its | |
1982 | protection back to writable */ | |
1983 | if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { | |
1984 | host_start = address & qemu_host_page_mask; | |
1985 | host_end = host_start + qemu_host_page_size; | |
1986 | ||
1987 | prot = 0; | |
1988 | for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { | |
1989 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1990 | p->flags |= PAGE_WRITE; | |
1991 | prot |= p->flags; | |
1992 | ||
1993 | /* and since the content will be modified, we must invalidate | |
1994 | the corresponding translated code. */ | |
d02532f0 | 1995 | tb_invalidate_phys_page(addr, pc, puc, true); |
5b6dd868 BS |
1996 | #ifdef DEBUG_TB_CHECK |
1997 | tb_invalidate_check(addr); | |
1998 | #endif | |
1999 | } | |
2000 | mprotect((void *)g2h(host_start), qemu_host_page_size, | |
2001 | prot & PAGE_BITS); | |
2002 | ||
2003 | mmap_unlock(); | |
2004 | return 1; | |
2005 | } | |
2006 | mmap_unlock(); | |
2007 | return 0; | |
2008 | } | |
2009 | #endif /* CONFIG_USER_ONLY */ |