]>
Commit | Line | Data |
---|---|---|
5ff7258c RH |
1 | /* |
2 | * Memory region management for Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "qemu/osdep.h" | |
c46184a9 RH |
26 | #include "qemu/units.h" |
27 | #include "qapi/error.h" | |
5ff7258c RH |
28 | #include "exec/exec-all.h" |
29 | #include "tcg/tcg.h" | |
5ff7258c RH |
30 | #include "tcg-internal.h" |
31 | ||
32 | ||
33 | struct tcg_region_tree { | |
34 | QemuMutex lock; | |
35 | GTree *tree; | |
36 | /* padding to avoid false sharing is computed at run-time */ | |
37 | }; | |
38 | ||
39 | /* | |
40 | * We divide code_gen_buffer into equally-sized "regions" that TCG threads | |
41 | * dynamically allocate from as demand dictates. Given appropriate region | |
42 | * sizing, this minimizes flushes even when some TCG threads generate a lot | |
43 | * more code than others. | |
44 | */ | |
45 | struct tcg_region_state { | |
46 | QemuMutex lock; | |
47 | ||
48 | /* fields set at init time */ | |
5ff7258c | 49 | void *start_aligned; |
c2471ca0 | 50 | void *after_prologue; |
5ff7258c RH |
51 | size_t n; |
52 | size_t size; /* size of one region */ | |
53 | size_t stride; /* .size + guard size */ | |
77bd7fd1 | 54 | size_t total_size; /* size of entire buffer, >= n * stride */ |
5ff7258c RH |
55 | |
56 | /* fields protected by the lock */ | |
57 | size_t current; /* current region index */ | |
58 | size_t agg_size_full; /* aggregate size of full regions */ | |
59 | }; | |
60 | ||
61 | static struct tcg_region_state region; | |
62 | ||
63 | /* | |
64 | * This is an array of struct tcg_region_tree's, with padding. | |
65 | * We use void * to simplify the computation of region_trees[i]; each | |
66 | * struct is found every tree_size bytes. | |
67 | */ | |
68 | static void *region_trees; | |
69 | static size_t tree_size; | |
70 | ||
47d590df RH |
71 | bool in_code_gen_buffer(const void *p) |
72 | { | |
47d590df RH |
73 | /* |
74 | * Much like it is valid to have a pointer to the byte past the | |
75 | * end of an array (so long as you don't dereference it), allow | |
76 | * a pointer to the byte past the end of the code gen buffer. | |
77 | */ | |
032a4b1b | 78 | return (size_t)(p - region.start_aligned) <= region.total_size; |
47d590df RH |
79 | } |
80 | ||
81 | #ifdef CONFIG_DEBUG_TCG | |
82 | const void *tcg_splitwx_to_rx(void *rw) | |
83 | { | |
84 | /* Pass NULL pointers unchanged. */ | |
85 | if (rw) { | |
86 | g_assert(in_code_gen_buffer(rw)); | |
87 | rw += tcg_splitwx_diff; | |
88 | } | |
89 | return rw; | |
90 | } | |
91 | ||
92 | void *tcg_splitwx_to_rw(const void *rx) | |
93 | { | |
94 | /* Pass NULL pointers unchanged. */ | |
95 | if (rx) { | |
96 | rx -= tcg_splitwx_diff; | |
97 | /* Assert that we end with a pointer in the rw region. */ | |
98 | g_assert(in_code_gen_buffer(rx)); | |
99 | } | |
100 | return (void *)rx; | |
101 | } | |
102 | #endif /* CONFIG_DEBUG_TCG */ | |
103 | ||
5ff7258c RH |
104 | /* compare a pointer @ptr and a tb_tc @s */ |
105 | static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s) | |
106 | { | |
107 | if (ptr >= s->ptr + s->size) { | |
108 | return 1; | |
109 | } else if (ptr < s->ptr) { | |
110 | return -1; | |
111 | } | |
112 | return 0; | |
113 | } | |
114 | ||
834361ef | 115 | static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp, gpointer userdata) |
5ff7258c RH |
116 | { |
117 | const struct tb_tc *a = ap; | |
118 | const struct tb_tc *b = bp; | |
119 | ||
120 | /* | |
121 | * When both sizes are set, we know this isn't a lookup. | |
122 | * This is the most likely case: every TB must be inserted; lookups | |
123 | * are a lot less frequent. | |
124 | */ | |
125 | if (likely(a->size && b->size)) { | |
126 | if (a->ptr > b->ptr) { | |
127 | return 1; | |
128 | } else if (a->ptr < b->ptr) { | |
129 | return -1; | |
130 | } | |
131 | /* a->ptr == b->ptr should happen only on deletions */ | |
132 | g_assert(a->size == b->size); | |
133 | return 0; | |
134 | } | |
135 | /* | |
136 | * All lookups have either .size field set to 0. | |
137 | * From the glib sources we see that @ap is always the lookup key. However | |
138 | * the docs provide no guarantee, so we just mark this case as likely. | |
139 | */ | |
140 | if (likely(a->size == 0)) { | |
141 | return ptr_cmp_tb_tc(a->ptr, b); | |
142 | } | |
143 | return ptr_cmp_tb_tc(b->ptr, a); | |
144 | } | |
145 | ||
834361ef LW |
146 | static void tb_destroy(gpointer value) |
147 | { | |
148 | TranslationBlock *tb = value; | |
149 | qemu_spin_destroy(&tb->jmp_lock); | |
150 | } | |
151 | ||
5ff7258c RH |
152 | static void tcg_region_trees_init(void) |
153 | { | |
154 | size_t i; | |
155 | ||
156 | tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize); | |
157 | region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size); | |
158 | for (i = 0; i < region.n; i++) { | |
159 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
160 | ||
161 | qemu_mutex_init(&rt->lock); | |
834361ef | 162 | rt->tree = g_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy); |
5ff7258c RH |
163 | } |
164 | } | |
165 | ||
166 | static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p) | |
167 | { | |
168 | size_t region_idx; | |
169 | ||
170 | /* | |
171 | * Like tcg_splitwx_to_rw, with no assert. The pc may come from | |
172 | * a signal handler over which the caller has no control. | |
173 | */ | |
174 | if (!in_code_gen_buffer(p)) { | |
175 | p -= tcg_splitwx_diff; | |
176 | if (!in_code_gen_buffer(p)) { | |
177 | return NULL; | |
178 | } | |
179 | } | |
180 | ||
181 | if (p < region.start_aligned) { | |
182 | region_idx = 0; | |
183 | } else { | |
184 | ptrdiff_t offset = p - region.start_aligned; | |
185 | ||
186 | if (offset > region.stride * (region.n - 1)) { | |
187 | region_idx = region.n - 1; | |
188 | } else { | |
189 | region_idx = offset / region.stride; | |
190 | } | |
191 | } | |
192 | return region_trees + region_idx * tree_size; | |
193 | } | |
194 | ||
195 | void tcg_tb_insert(TranslationBlock *tb) | |
196 | { | |
197 | struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); | |
198 | ||
199 | g_assert(rt != NULL); | |
200 | qemu_mutex_lock(&rt->lock); | |
201 | g_tree_insert(rt->tree, &tb->tc, tb); | |
202 | qemu_mutex_unlock(&rt->lock); | |
203 | } | |
204 | ||
205 | void tcg_tb_remove(TranslationBlock *tb) | |
206 | { | |
207 | struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); | |
208 | ||
209 | g_assert(rt != NULL); | |
210 | qemu_mutex_lock(&rt->lock); | |
211 | g_tree_remove(rt->tree, &tb->tc); | |
212 | qemu_mutex_unlock(&rt->lock); | |
213 | } | |
214 | ||
215 | /* | |
216 | * Find the TB 'tb' such that | |
217 | * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size | |
218 | * Return NULL if not found. | |
219 | */ | |
220 | TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) | |
221 | { | |
222 | struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr); | |
223 | TranslationBlock *tb; | |
224 | struct tb_tc s = { .ptr = (void *)tc_ptr }; | |
225 | ||
226 | if (rt == NULL) { | |
227 | return NULL; | |
228 | } | |
229 | ||
230 | qemu_mutex_lock(&rt->lock); | |
231 | tb = g_tree_lookup(rt->tree, &s); | |
232 | qemu_mutex_unlock(&rt->lock); | |
233 | return tb; | |
234 | } | |
235 | ||
236 | static void tcg_region_tree_lock_all(void) | |
237 | { | |
238 | size_t i; | |
239 | ||
240 | for (i = 0; i < region.n; i++) { | |
241 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
242 | ||
243 | qemu_mutex_lock(&rt->lock); | |
244 | } | |
245 | } | |
246 | ||
247 | static void tcg_region_tree_unlock_all(void) | |
248 | { | |
249 | size_t i; | |
250 | ||
251 | for (i = 0; i < region.n; i++) { | |
252 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
253 | ||
254 | qemu_mutex_unlock(&rt->lock); | |
255 | } | |
256 | } | |
257 | ||
258 | void tcg_tb_foreach(GTraverseFunc func, gpointer user_data) | |
259 | { | |
260 | size_t i; | |
261 | ||
262 | tcg_region_tree_lock_all(); | |
263 | for (i = 0; i < region.n; i++) { | |
264 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
265 | ||
266 | g_tree_foreach(rt->tree, func, user_data); | |
267 | } | |
268 | tcg_region_tree_unlock_all(); | |
269 | } | |
270 | ||
271 | size_t tcg_nb_tbs(void) | |
272 | { | |
273 | size_t nb_tbs = 0; | |
274 | size_t i; | |
275 | ||
276 | tcg_region_tree_lock_all(); | |
277 | for (i = 0; i < region.n; i++) { | |
278 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
279 | ||
280 | nb_tbs += g_tree_nnodes(rt->tree); | |
281 | } | |
282 | tcg_region_tree_unlock_all(); | |
283 | return nb_tbs; | |
284 | } | |
285 | ||
5ff7258c RH |
286 | static void tcg_region_tree_reset_all(void) |
287 | { | |
288 | size_t i; | |
289 | ||
290 | tcg_region_tree_lock_all(); | |
291 | for (i = 0; i < region.n; i++) { | |
292 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
293 | ||
5ff7258c RH |
294 | /* Increment the refcount first so that destroy acts as a reset */ |
295 | g_tree_ref(rt->tree); | |
296 | g_tree_destroy(rt->tree); | |
297 | } | |
298 | tcg_region_tree_unlock_all(); | |
299 | } | |
300 | ||
301 | static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend) | |
302 | { | |
303 | void *start, *end; | |
304 | ||
305 | start = region.start_aligned + curr_region * region.stride; | |
306 | end = start + region.size; | |
307 | ||
308 | if (curr_region == 0) { | |
c2471ca0 | 309 | start = region.after_prologue; |
5ff7258c | 310 | } |
77bd7fd1 | 311 | /* The final region may have a few extra pages due to earlier rounding. */ |
5ff7258c | 312 | if (curr_region == region.n - 1) { |
77bd7fd1 | 313 | end = region.start_aligned + region.total_size; |
5ff7258c RH |
314 | } |
315 | ||
316 | *pstart = start; | |
317 | *pend = end; | |
318 | } | |
319 | ||
320 | static void tcg_region_assign(TCGContext *s, size_t curr_region) | |
321 | { | |
322 | void *start, *end; | |
323 | ||
324 | tcg_region_bounds(curr_region, &start, &end); | |
325 | ||
326 | s->code_gen_buffer = start; | |
327 | s->code_gen_ptr = start; | |
328 | s->code_gen_buffer_size = end - start; | |
329 | s->code_gen_highwater = end - TCG_HIGHWATER; | |
330 | } | |
331 | ||
332 | static bool tcg_region_alloc__locked(TCGContext *s) | |
333 | { | |
334 | if (region.current == region.n) { | |
335 | return true; | |
336 | } | |
337 | tcg_region_assign(s, region.current); | |
338 | region.current++; | |
339 | return false; | |
340 | } | |
341 | ||
342 | /* | |
343 | * Request a new region once the one in use has filled up. | |
344 | * Returns true on error. | |
345 | */ | |
346 | bool tcg_region_alloc(TCGContext *s) | |
347 | { | |
348 | bool err; | |
349 | /* read the region size now; alloc__locked will overwrite it on success */ | |
350 | size_t size_full = s->code_gen_buffer_size; | |
351 | ||
352 | qemu_mutex_lock(®ion.lock); | |
353 | err = tcg_region_alloc__locked(s); | |
354 | if (!err) { | |
355 | region.agg_size_full += size_full - TCG_HIGHWATER; | |
356 | } | |
357 | qemu_mutex_unlock(®ion.lock); | |
358 | return err; | |
359 | } | |
360 | ||
361 | /* | |
362 | * Perform a context's first region allocation. | |
363 | * This function does _not_ increment region.agg_size_full. | |
364 | */ | |
365 | static void tcg_region_initial_alloc__locked(TCGContext *s) | |
366 | { | |
367 | bool err = tcg_region_alloc__locked(s); | |
368 | g_assert(!err); | |
369 | } | |
370 | ||
371 | void tcg_region_initial_alloc(TCGContext *s) | |
372 | { | |
373 | qemu_mutex_lock(®ion.lock); | |
374 | tcg_region_initial_alloc__locked(s); | |
375 | qemu_mutex_unlock(®ion.lock); | |
376 | } | |
377 | ||
378 | /* Call from a safe-work context */ | |
379 | void tcg_region_reset_all(void) | |
380 | { | |
0e2d61cf | 381 | unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); |
5ff7258c RH |
382 | unsigned int i; |
383 | ||
384 | qemu_mutex_lock(®ion.lock); | |
385 | region.current = 0; | |
386 | region.agg_size_full = 0; | |
387 | ||
388 | for (i = 0; i < n_ctxs; i++) { | |
389 | TCGContext *s = qatomic_read(&tcg_ctxs[i]); | |
390 | tcg_region_initial_alloc__locked(s); | |
391 | } | |
392 | qemu_mutex_unlock(®ion.lock); | |
393 | ||
394 | tcg_region_tree_reset_all(); | |
395 | } | |
396 | ||
01afda99 | 397 | static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus) |
5ff7258c | 398 | { |
43b972b7 | 399 | #ifdef CONFIG_USER_ONLY |
5ff7258c | 400 | return 1; |
5ff7258c | 401 | #else |
01afda99 RH |
402 | size_t n_regions; |
403 | ||
43b972b7 RH |
404 | /* |
405 | * It is likely that some vCPUs will translate more code than others, | |
406 | * so we first try to set more regions than max_cpus, with those regions | |
407 | * being of reasonable size. If that's not possible we make do by evenly | |
408 | * dividing the code_gen_buffer among the vCPUs. | |
409 | */ | |
5ff7258c | 410 | /* Use a single region if all we have is one vCPU thread */ |
5ff7258c RH |
411 | if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) { |
412 | return 1; | |
413 | } | |
414 | ||
01afda99 RH |
415 | /* |
416 | * Try to have more regions than max_cpus, with each region being >= 2 MB. | |
417 | * If we can't, then just allocate one region per vCPU thread. | |
418 | */ | |
419 | n_regions = tb_size / (2 * MiB); | |
420 | if (n_regions <= max_cpus) { | |
421 | return max_cpus; | |
5ff7258c | 422 | } |
01afda99 | 423 | return MIN(n_regions, max_cpus * 8); |
5ff7258c | 424 | #endif |
43b972b7 | 425 | } |
5ff7258c | 426 | |
c46184a9 RH |
427 | /* |
428 | * Minimum size of the code gen buffer. This number is randomly chosen, | |
429 | * but not so small that we can't have a fair number of TB's live. | |
26a75d12 RH |
430 | * |
431 | * Maximum size, MAX_CODE_GEN_BUFFER_SIZE, is defined in tcg-target.h. | |
432 | * Unless otherwise indicated, this is constrained by the range of | |
433 | * direct branches on the host cpu, as used by the TCG implementation | |
434 | * of goto_tb. | |
c46184a9 RH |
435 | */ |
436 | #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB) | |
437 | ||
c46184a9 RH |
438 | #if TCG_TARGET_REG_BITS == 32 |
439 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB) | |
440 | #ifdef CONFIG_USER_ONLY | |
441 | /* | |
442 | * For user mode on smaller 32 bit systems we may run into trouble | |
443 | * allocating big chunks of data in the right place. On these systems | |
444 | * we utilise a static code generation buffer directly in the binary. | |
445 | */ | |
446 | #define USE_STATIC_CODE_GEN_BUFFER | |
447 | #endif | |
448 | #else /* TCG_TARGET_REG_BITS == 64 */ | |
449 | #ifdef CONFIG_USER_ONLY | |
450 | /* | |
451 | * As user-mode emulation typically means running multiple instances | |
452 | * of the translator don't go too nuts with our default code gen | |
453 | * buffer lest we make things too hard for the OS. | |
454 | */ | |
455 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB) | |
456 | #else | |
457 | /* | |
458 | * We expect most system emulation to run one or two guests per host. | |
459 | * Users running large scale system emulation may want to tweak their | |
460 | * runtime setup via the tb-size control on the command line. | |
461 | */ | |
462 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB) | |
463 | #endif | |
464 | #endif | |
465 | ||
466 | #define DEFAULT_CODE_GEN_BUFFER_SIZE \ | |
467 | (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ | |
468 | ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) | |
469 | ||
c46184a9 RH |
470 | #ifdef __mips__ |
471 | /* | |
472 | * In order to use J and JAL within the code_gen_buffer, we require | |
473 | * that the buffer not cross a 256MB boundary. | |
474 | */ | |
475 | static inline bool cross_256mb(void *addr, size_t size) | |
476 | { | |
477 | return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; | |
478 | } | |
479 | ||
480 | /* | |
481 | * We weren't able to allocate a buffer without crossing that boundary, | |
482 | * so make do with the larger portion of the buffer that doesn't cross. | |
a4df1b2d | 483 | * Returns the new base and size of the buffer in *obuf and *osize. |
c46184a9 | 484 | */ |
a4df1b2d RH |
485 | static inline void split_cross_256mb(void **obuf, size_t *osize, |
486 | void *buf1, size_t size1) | |
c46184a9 RH |
487 | { |
488 | void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); | |
489 | size_t size2 = buf1 + size1 - buf2; | |
490 | ||
491 | size1 = buf2 - buf1; | |
492 | if (size1 < size2) { | |
493 | size1 = size2; | |
494 | buf1 = buf2; | |
495 | } | |
496 | ||
a4df1b2d RH |
497 | *obuf = buf1; |
498 | *osize = size1; | |
c46184a9 RH |
499 | } |
500 | #endif | |
501 | ||
502 | #ifdef USE_STATIC_CODE_GEN_BUFFER | |
503 | static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] | |
504 | __attribute__((aligned(CODE_GEN_ALIGN))); | |
505 | ||
7be9ebcf | 506 | static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) |
c46184a9 RH |
507 | { |
508 | void *buf, *end; | |
509 | size_t size; | |
510 | ||
511 | if (splitwx > 0) { | |
512 | error_setg(errp, "jit split-wx not supported"); | |
7be9ebcf | 513 | return -1; |
c46184a9 RH |
514 | } |
515 | ||
516 | /* page-align the beginning and end of the buffer */ | |
517 | buf = static_code_gen_buffer; | |
518 | end = static_code_gen_buffer + sizeof(static_code_gen_buffer); | |
519 | buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size); | |
520 | end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size); | |
521 | ||
522 | size = end - buf; | |
523 | ||
524 | /* Honor a command-line option limiting the size of the buffer. */ | |
525 | if (size > tb_size) { | |
526 | size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size); | |
527 | } | |
c46184a9 RH |
528 | |
529 | #ifdef __mips__ | |
530 | if (cross_256mb(buf, size)) { | |
a4df1b2d | 531 | split_cross_256mb(&buf, &size, buf, size); |
c46184a9 RH |
532 | } |
533 | #endif | |
534 | ||
032a4b1b RH |
535 | region.start_aligned = buf; |
536 | region.total_size = size; | |
7be9ebcf RH |
537 | |
538 | return PROT_READ | PROT_WRITE; | |
c46184a9 RH |
539 | } |
540 | #elif defined(_WIN32) | |
7be9ebcf | 541 | static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) |
c46184a9 RH |
542 | { |
543 | void *buf; | |
544 | ||
545 | if (splitwx > 0) { | |
546 | error_setg(errp, "jit split-wx not supported"); | |
7be9ebcf | 547 | return -1; |
c46184a9 RH |
548 | } |
549 | ||
550 | buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, | |
551 | PAGE_EXECUTE_READWRITE); | |
552 | if (buf == NULL) { | |
553 | error_setg_win32(errp, GetLastError(), | |
554 | "allocate %zu bytes for jit buffer", size); | |
555 | return false; | |
556 | } | |
557 | ||
032a4b1b RH |
558 | region.start_aligned = buf; |
559 | region.total_size = size; | |
7be9ebcf RH |
560 | |
561 | return PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
c46184a9 RH |
562 | } |
563 | #else | |
7be9ebcf RH |
564 | static int alloc_code_gen_buffer_anon(size_t size, int prot, |
565 | int flags, Error **errp) | |
c46184a9 RH |
566 | { |
567 | void *buf; | |
568 | ||
569 | buf = mmap(NULL, size, prot, flags, -1, 0); | |
570 | if (buf == MAP_FAILED) { | |
571 | error_setg_errno(errp, errno, | |
572 | "allocate %zu bytes for jit buffer", size); | |
7be9ebcf | 573 | return -1; |
c46184a9 | 574 | } |
c46184a9 RH |
575 | |
576 | #ifdef __mips__ | |
577 | if (cross_256mb(buf, size)) { | |
578 | /* | |
579 | * Try again, with the original still mapped, to avoid re-acquiring | |
580 | * the same 256mb crossing. | |
581 | */ | |
582 | size_t size2; | |
583 | void *buf2 = mmap(NULL, size, prot, flags, -1, 0); | |
584 | switch ((int)(buf2 != MAP_FAILED)) { | |
585 | case 1: | |
586 | if (!cross_256mb(buf2, size)) { | |
587 | /* Success! Use the new buffer. */ | |
588 | munmap(buf, size); | |
589 | break; | |
590 | } | |
591 | /* Failure. Work with what we had. */ | |
592 | munmap(buf2, size); | |
593 | /* fallthru */ | |
594 | default: | |
595 | /* Split the original buffer. Free the smaller half. */ | |
a4df1b2d | 596 | split_cross_256mb(&buf2, &size2, buf, size); |
c46184a9 RH |
597 | if (buf == buf2) { |
598 | munmap(buf + size2, size - size2); | |
599 | } else { | |
600 | munmap(buf, size - size2); | |
601 | } | |
602 | size = size2; | |
603 | break; | |
604 | } | |
605 | buf = buf2; | |
606 | } | |
607 | #endif | |
608 | ||
032a4b1b RH |
609 | region.start_aligned = buf; |
610 | region.total_size = size; | |
7be9ebcf | 611 | return prot; |
c46184a9 RH |
612 | } |
613 | ||
614 | #ifndef CONFIG_TCG_INTERPRETER | |
615 | #ifdef CONFIG_POSIX | |
616 | #include "qemu/memfd.h" | |
617 | ||
618 | static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) | |
619 | { | |
620 | void *buf_rw = NULL, *buf_rx = MAP_FAILED; | |
621 | int fd = -1; | |
622 | ||
623 | #ifdef __mips__ | |
624 | /* Find space for the RX mapping, vs the 256MiB regions. */ | |
7be9ebcf RH |
625 | if (alloc_code_gen_buffer_anon(size, PROT_NONE, |
626 | MAP_PRIVATE | MAP_ANONYMOUS | | |
627 | MAP_NORESERVE, errp) < 0) { | |
c46184a9 RH |
628 | return false; |
629 | } | |
630 | /* The size of the mapping may have been adjusted. */ | |
032a4b1b RH |
631 | buf_rx = region.start_aligned; |
632 | size = region.total_size; | |
c46184a9 RH |
633 | #endif |
634 | ||
635 | buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp); | |
636 | if (buf_rw == NULL) { | |
637 | goto fail; | |
638 | } | |
639 | ||
640 | #ifdef __mips__ | |
641 | void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC, | |
642 | MAP_SHARED | MAP_FIXED, fd, 0); | |
643 | if (tmp != buf_rx) { | |
644 | goto fail_rx; | |
645 | } | |
646 | #else | |
647 | buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0); | |
648 | if (buf_rx == MAP_FAILED) { | |
649 | goto fail_rx; | |
650 | } | |
651 | #endif | |
652 | ||
653 | close(fd); | |
032a4b1b RH |
654 | region.start_aligned = buf_rw; |
655 | region.total_size = size; | |
c46184a9 RH |
656 | tcg_splitwx_diff = buf_rx - buf_rw; |
657 | ||
7be9ebcf | 658 | return PROT_READ | PROT_WRITE; |
c46184a9 RH |
659 | |
660 | fail_rx: | |
661 | error_setg_errno(errp, errno, "failed to map shared memory for execute"); | |
662 | fail: | |
663 | if (buf_rx != MAP_FAILED) { | |
664 | munmap(buf_rx, size); | |
665 | } | |
666 | if (buf_rw) { | |
667 | munmap(buf_rw, size); | |
668 | } | |
669 | if (fd >= 0) { | |
670 | close(fd); | |
671 | } | |
7be9ebcf | 672 | return -1; |
c46184a9 RH |
673 | } |
674 | #endif /* CONFIG_POSIX */ | |
675 | ||
676 | #ifdef CONFIG_DARWIN | |
677 | #include <mach/mach.h> | |
678 | ||
679 | extern kern_return_t mach_vm_remap(vm_map_t target_task, | |
680 | mach_vm_address_t *target_address, | |
681 | mach_vm_size_t size, | |
682 | mach_vm_offset_t mask, | |
683 | int flags, | |
684 | vm_map_t src_task, | |
685 | mach_vm_address_t src_address, | |
686 | boolean_t copy, | |
687 | vm_prot_t *cur_protection, | |
688 | vm_prot_t *max_protection, | |
689 | vm_inherit_t inheritance); | |
690 | ||
7be9ebcf | 691 | static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp) |
c46184a9 RH |
692 | { |
693 | kern_return_t ret; | |
694 | mach_vm_address_t buf_rw, buf_rx; | |
695 | vm_prot_t cur_prot, max_prot; | |
696 | ||
697 | /* Map the read-write portion via normal anon memory. */ | |
698 | if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE, | |
699 | MAP_PRIVATE | MAP_ANONYMOUS, errp)) { | |
7be9ebcf | 700 | return -1; |
c46184a9 RH |
701 | } |
702 | ||
032a4b1b | 703 | buf_rw = (mach_vm_address_t)region.start_aligned; |
c46184a9 RH |
704 | buf_rx = 0; |
705 | ret = mach_vm_remap(mach_task_self(), | |
706 | &buf_rx, | |
707 | size, | |
708 | 0, | |
709 | VM_FLAGS_ANYWHERE, | |
710 | mach_task_self(), | |
711 | buf_rw, | |
712 | false, | |
713 | &cur_prot, | |
714 | &max_prot, | |
715 | VM_INHERIT_NONE); | |
716 | if (ret != KERN_SUCCESS) { | |
717 | /* TODO: Convert "ret" to a human readable error message. */ | |
718 | error_setg(errp, "vm_remap for jit splitwx failed"); | |
719 | munmap((void *)buf_rw, size); | |
7be9ebcf | 720 | return -1; |
c46184a9 RH |
721 | } |
722 | ||
723 | if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) { | |
724 | error_setg_errno(errp, errno, "mprotect for jit splitwx"); | |
725 | munmap((void *)buf_rx, size); | |
726 | munmap((void *)buf_rw, size); | |
7be9ebcf | 727 | return -1; |
c46184a9 RH |
728 | } |
729 | ||
730 | tcg_splitwx_diff = buf_rx - buf_rw; | |
7be9ebcf | 731 | return PROT_READ | PROT_WRITE; |
c46184a9 RH |
732 | } |
733 | #endif /* CONFIG_DARWIN */ | |
734 | #endif /* CONFIG_TCG_INTERPRETER */ | |
735 | ||
7be9ebcf | 736 | static int alloc_code_gen_buffer_splitwx(size_t size, Error **errp) |
c46184a9 RH |
737 | { |
738 | #ifndef CONFIG_TCG_INTERPRETER | |
739 | # ifdef CONFIG_DARWIN | |
740 | return alloc_code_gen_buffer_splitwx_vmremap(size, errp); | |
741 | # endif | |
742 | # ifdef CONFIG_POSIX | |
743 | return alloc_code_gen_buffer_splitwx_memfd(size, errp); | |
744 | # endif | |
745 | #endif | |
746 | error_setg(errp, "jit split-wx not supported"); | |
7be9ebcf | 747 | return -1; |
c46184a9 RH |
748 | } |
749 | ||
7be9ebcf | 750 | static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) |
c46184a9 RH |
751 | { |
752 | ERRP_GUARD(); | |
753 | int prot, flags; | |
754 | ||
755 | if (splitwx) { | |
7be9ebcf RH |
756 | prot = alloc_code_gen_buffer_splitwx(size, errp); |
757 | if (prot >= 0) { | |
758 | return prot; | |
c46184a9 RH |
759 | } |
760 | /* | |
761 | * If splitwx force-on (1), fail; | |
762 | * if splitwx default-on (-1), fall through to splitwx off. | |
763 | */ | |
764 | if (splitwx > 0) { | |
7be9ebcf | 765 | return -1; |
c46184a9 RH |
766 | } |
767 | error_free_or_abort(errp); | |
768 | } | |
769 | ||
b7da02da RH |
770 | /* |
771 | * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect | |
772 | * rejects a permission change from RWX -> NONE when reserving the | |
773 | * guard pages later. We can go the other way with the same number | |
774 | * of syscalls, so always begin with PROT_NONE. | |
775 | */ | |
776 | prot = PROT_NONE; | |
c46184a9 | 777 | flags = MAP_PRIVATE | MAP_ANONYMOUS; |
b7da02da | 778 | #ifdef CONFIG_DARWIN |
c46184a9 RH |
779 | /* Applicable to both iOS and macOS (Apple Silicon). */ |
780 | if (!splitwx) { | |
781 | flags |= MAP_JIT; | |
782 | } | |
783 | #endif | |
784 | ||
785 | return alloc_code_gen_buffer_anon(size, prot, flags, errp); | |
786 | } | |
787 | #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ | |
788 | ||
5ff7258c RH |
789 | /* |
790 | * Initializes region partitioning. | |
791 | * | |
792 | * Called at init time from the parent thread (i.e. the one calling | |
793 | * tcg_context_init), after the target's TCG globals have been set. | |
794 | * | |
795 | * Region partitioning works by splitting code_gen_buffer into separate regions, | |
796 | * and then assigning regions to TCG threads so that the threads can translate | |
797 | * code in parallel without synchronization. | |
798 | * | |
799 | * In softmmu the number of TCG threads is bounded by max_cpus, so we use at | |
800 | * least max_cpus regions in MTTCG. In !MTTCG we use a single region. | |
801 | * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...]) | |
802 | * must have been parsed before calling this function, since it calls | |
803 | * qemu_tcg_mttcg_enabled(). | |
804 | * | |
805 | * In user-mode we use a single region. Having multiple regions in user-mode | |
806 | * is not supported, because the number of vCPU threads (recall that each thread | |
807 | * spawned by the guest corresponds to a vCPU thread) is only bounded by the | |
808 | * OS, and usually this number is huge (tens of thousands is not uncommon). | |
809 | * Thus, given this large bound on the number of vCPU threads and the fact | |
810 | * that code_gen_buffer is allocated at compile-time, we cannot guarantee | |
811 | * that the availability of at least one region per vCPU thread. | |
812 | * | |
813 | * However, this user-mode limitation is unlikely to be a significant problem | |
814 | * in practice. Multi-threaded guests share most if not all of their translated | |
815 | * code, which makes parallel code generation less appealing than in softmmu. | |
816 | */ | |
43b972b7 | 817 | void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) |
5ff7258c | 818 | { |
ba22783d | 819 | const size_t page_size = qemu_real_host_page_size; |
5ff7258c | 820 | size_t region_size; |
22c6a993 | 821 | int have_prot, need_prot; |
c46184a9 | 822 | |
ba22783d RH |
823 | /* Size the buffer. */ |
824 | if (tb_size == 0) { | |
825 | size_t phys_mem = qemu_get_host_physmem(); | |
826 | if (phys_mem == 0) { | |
827 | tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; | |
828 | } else { | |
829 | tb_size = QEMU_ALIGN_DOWN(phys_mem / 8, page_size); | |
830 | tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, tb_size); | |
831 | } | |
832 | } | |
833 | if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { | |
834 | tb_size = MIN_CODE_GEN_BUFFER_SIZE; | |
835 | } | |
836 | if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { | |
837 | tb_size = MAX_CODE_GEN_BUFFER_SIZE; | |
838 | } | |
839 | ||
840 | have_prot = alloc_code_gen_buffer(tb_size, splitwx, &error_fatal); | |
7be9ebcf | 841 | assert(have_prot >= 0); |
5ff7258c | 842 | |
cd9ea992 RH |
843 | /* Request large pages for the buffer and the splitwx. */ |
844 | qemu_madvise(region.start_aligned, region.total_size, QEMU_MADV_HUGEPAGE); | |
845 | if (tcg_splitwx_diff) { | |
846 | qemu_madvise(region.start_aligned + tcg_splitwx_diff, | |
847 | region.total_size, QEMU_MADV_HUGEPAGE); | |
848 | } | |
849 | ||
5ff7258c RH |
850 | /* |
851 | * Make region_size a multiple of page_size, using aligned as the start. | |
852 | * As a result of this we might end up with a few extra pages at the end of | |
853 | * the buffer; we will assign those to the last region. | |
854 | */ | |
ba22783d RH |
855 | region.n = tcg_n_regions(tb_size, max_cpus); |
856 | region_size = tb_size / region.n; | |
5ff7258c RH |
857 | region_size = QEMU_ALIGN_DOWN(region_size, page_size); |
858 | ||
859 | /* A region must have at least 2 pages; one code, one guard */ | |
860 | g_assert(region_size >= 2 * page_size); | |
032a4b1b RH |
861 | region.stride = region_size; |
862 | ||
863 | /* Reserve space for guard pages. */ | |
864 | region.size = region_size - page_size; | |
865 | region.total_size -= page_size; | |
866 | ||
867 | /* | |
868 | * The first region will be smaller than the others, via the prologue, | |
869 | * which has yet to be allocated. For now, the first region begins at | |
870 | * the page boundary. | |
871 | */ | |
872 | region.after_prologue = region.start_aligned; | |
5ff7258c RH |
873 | |
874 | /* init the region struct */ | |
875 | qemu_mutex_init(®ion.lock); | |
5ff7258c RH |
876 | |
877 | /* | |
878 | * Set guard pages in the rw buffer, as that's the one into which | |
879 | * buffer overruns could occur. Do not set guard pages in the rx | |
880 | * buffer -- let that one use hugepages throughout. | |
22c6a993 | 881 | * Work with the page protections set up with the initial mapping. |
5ff7258c | 882 | */ |
22c6a993 RH |
883 | need_prot = PAGE_READ | PAGE_WRITE; |
884 | #ifndef CONFIG_TCG_INTERPRETER | |
885 | if (tcg_splitwx_diff == 0) { | |
886 | need_prot |= PAGE_EXEC; | |
887 | } | |
888 | #endif | |
889 | for (size_t i = 0, n = region.n; i < n; i++) { | |
5ff7258c RH |
890 | void *start, *end; |
891 | ||
892 | tcg_region_bounds(i, &start, &end); | |
22c6a993 RH |
893 | if (have_prot != need_prot) { |
894 | int rc; | |
5ff7258c | 895 | |
22c6a993 RH |
896 | if (need_prot == (PAGE_READ | PAGE_WRITE | PAGE_EXEC)) { |
897 | rc = qemu_mprotect_rwx(start, end - start); | |
898 | } else if (need_prot == (PAGE_READ | PAGE_WRITE)) { | |
899 | rc = qemu_mprotect_rw(start, end - start); | |
900 | } else { | |
901 | g_assert_not_reached(); | |
902 | } | |
903 | if (rc) { | |
904 | error_setg_errno(&error_fatal, errno, | |
905 | "mprotect of jit buffer"); | |
906 | } | |
907 | } | |
908 | if (have_prot != 0) { | |
b7da02da | 909 | /* Guard pages are nice for bug detection but are not essential. */ |
22c6a993 RH |
910 | (void)qemu_mprotect_none(end, page_size); |
911 | } | |
5ff7258c RH |
912 | } |
913 | ||
914 | tcg_region_trees_init(); | |
915 | ||
916 | /* | |
917 | * Leave the initial context initialized to the first region. | |
918 | * This will be the context into which we generate the prologue. | |
919 | * It is also the only context for CONFIG_USER_ONLY. | |
920 | */ | |
921 | tcg_region_initial_alloc__locked(&tcg_init_ctx); | |
922 | } | |
923 | ||
924 | void tcg_region_prologue_set(TCGContext *s) | |
925 | { | |
926 | /* Deduct the prologue from the first region. */ | |
c2471ca0 RH |
927 | g_assert(region.start_aligned == s->code_gen_buffer); |
928 | region.after_prologue = s->code_ptr; | |
5ff7258c RH |
929 | |
930 | /* Recompute boundaries of the first region. */ | |
931 | tcg_region_assign(s, 0); | |
932 | ||
933 | /* Register the balance of the buffer with gdb. */ | |
c2471ca0 RH |
934 | tcg_register_jit(tcg_splitwx_to_rx(region.after_prologue), |
935 | region.start_aligned + region.total_size - | |
936 | region.after_prologue); | |
5ff7258c RH |
937 | } |
938 | ||
939 | /* | |
940 | * Returns the size (in bytes) of all translated code (i.e. from all regions) | |
941 | * currently in the cache. | |
942 | * See also: tcg_code_capacity() | |
943 | * Do not confuse with tcg_current_code_size(); that one applies to a single | |
944 | * TCG context. | |
945 | */ | |
946 | size_t tcg_code_size(void) | |
947 | { | |
0e2d61cf | 948 | unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); |
5ff7258c RH |
949 | unsigned int i; |
950 | size_t total; | |
951 | ||
952 | qemu_mutex_lock(®ion.lock); | |
953 | total = region.agg_size_full; | |
954 | for (i = 0; i < n_ctxs; i++) { | |
955 | const TCGContext *s = qatomic_read(&tcg_ctxs[i]); | |
956 | size_t size; | |
957 | ||
958 | size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer; | |
959 | g_assert(size <= s->code_gen_buffer_size); | |
960 | total += size; | |
961 | } | |
962 | qemu_mutex_unlock(®ion.lock); | |
963 | return total; | |
964 | } | |
965 | ||
966 | /* | |
967 | * Returns the code capacity (in bytes) of the entire cache, i.e. including all | |
968 | * regions. | |
969 | * See also: tcg_code_size() | |
970 | */ | |
971 | size_t tcg_code_capacity(void) | |
972 | { | |
973 | size_t guard_size, capacity; | |
974 | ||
975 | /* no need for synchronization; these variables are set at init time */ | |
976 | guard_size = region.stride - region.size; | |
77bd7fd1 RH |
977 | capacity = region.total_size; |
978 | capacity -= (region.n - 1) * guard_size; | |
979 | capacity -= region.n * TCG_HIGHWATER; | |
980 | ||
5ff7258c RH |
981 | return capacity; |
982 | } | |
983 | ||
984 | size_t tcg_tb_phys_invalidate_count(void) | |
985 | { | |
0e2d61cf | 986 | unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); |
5ff7258c RH |
987 | unsigned int i; |
988 | size_t total = 0; | |
989 | ||
990 | for (i = 0; i < n_ctxs; i++) { | |
991 | const TCGContext *s = qatomic_read(&tcg_ctxs[i]); | |
992 | ||
993 | total += qatomic_read(&s->tb_phys_invalidate_count); | |
994 | } | |
995 | return total; | |
996 | } |