]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Memory region management for Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "qemu/osdep.h" | |
26 | #include "qemu/units.h" | |
27 | #include "qapi/error.h" | |
28 | #include "exec/exec-all.h" | |
29 | #include "tcg/tcg.h" | |
30 | #include "tcg-internal.h" | |
31 | ||
32 | ||
33 | struct tcg_region_tree { | |
34 | QemuMutex lock; | |
35 | GTree *tree; | |
36 | /* padding to avoid false sharing is computed at run-time */ | |
37 | }; | |
38 | ||
39 | /* | |
40 | * We divide code_gen_buffer into equally-sized "regions" that TCG threads | |
41 | * dynamically allocate from as demand dictates. Given appropriate region | |
42 | * sizing, this minimizes flushes even when some TCG threads generate a lot | |
43 | * more code than others. | |
44 | */ | |
45 | struct tcg_region_state { | |
46 | QemuMutex lock; | |
47 | ||
48 | /* fields set at init time */ | |
49 | void *start; | |
50 | void *start_aligned; | |
51 | void *end; | |
52 | size_t n; | |
53 | size_t size; /* size of one region */ | |
54 | size_t stride; /* .size + guard size */ | |
55 | ||
56 | /* fields protected by the lock */ | |
57 | size_t current; /* current region index */ | |
58 | size_t agg_size_full; /* aggregate size of full regions */ | |
59 | }; | |
60 | ||
61 | static struct tcg_region_state region; | |
62 | ||
63 | /* | |
64 | * This is an array of struct tcg_region_tree's, with padding. | |
65 | * We use void * to simplify the computation of region_trees[i]; each | |
66 | * struct is found every tree_size bytes. | |
67 | */ | |
68 | static void *region_trees; | |
69 | static size_t tree_size; | |
70 | ||
71 | /* compare a pointer @ptr and a tb_tc @s */ | |
72 | static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s) | |
73 | { | |
74 | if (ptr >= s->ptr + s->size) { | |
75 | return 1; | |
76 | } else if (ptr < s->ptr) { | |
77 | return -1; | |
78 | } | |
79 | return 0; | |
80 | } | |
81 | ||
82 | static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp) | |
83 | { | |
84 | const struct tb_tc *a = ap; | |
85 | const struct tb_tc *b = bp; | |
86 | ||
87 | /* | |
88 | * When both sizes are set, we know this isn't a lookup. | |
89 | * This is the most likely case: every TB must be inserted; lookups | |
90 | * are a lot less frequent. | |
91 | */ | |
92 | if (likely(a->size && b->size)) { | |
93 | if (a->ptr > b->ptr) { | |
94 | return 1; | |
95 | } else if (a->ptr < b->ptr) { | |
96 | return -1; | |
97 | } | |
98 | /* a->ptr == b->ptr should happen only on deletions */ | |
99 | g_assert(a->size == b->size); | |
100 | return 0; | |
101 | } | |
102 | /* | |
103 | * All lookups have either .size field set to 0. | |
104 | * From the glib sources we see that @ap is always the lookup key. However | |
105 | * the docs provide no guarantee, so we just mark this case as likely. | |
106 | */ | |
107 | if (likely(a->size == 0)) { | |
108 | return ptr_cmp_tb_tc(a->ptr, b); | |
109 | } | |
110 | return ptr_cmp_tb_tc(b->ptr, a); | |
111 | } | |
112 | ||
113 | static void tcg_region_trees_init(void) | |
114 | { | |
115 | size_t i; | |
116 | ||
117 | tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize); | |
118 | region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size); | |
119 | for (i = 0; i < region.n; i++) { | |
120 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
121 | ||
122 | qemu_mutex_init(&rt->lock); | |
123 | rt->tree = g_tree_new(tb_tc_cmp); | |
124 | } | |
125 | } | |
126 | ||
127 | static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p) | |
128 | { | |
129 | size_t region_idx; | |
130 | ||
131 | /* | |
132 | * Like tcg_splitwx_to_rw, with no assert. The pc may come from | |
133 | * a signal handler over which the caller has no control. | |
134 | */ | |
135 | if (!in_code_gen_buffer(p)) { | |
136 | p -= tcg_splitwx_diff; | |
137 | if (!in_code_gen_buffer(p)) { | |
138 | return NULL; | |
139 | } | |
140 | } | |
141 | ||
142 | if (p < region.start_aligned) { | |
143 | region_idx = 0; | |
144 | } else { | |
145 | ptrdiff_t offset = p - region.start_aligned; | |
146 | ||
147 | if (offset > region.stride * (region.n - 1)) { | |
148 | region_idx = region.n - 1; | |
149 | } else { | |
150 | region_idx = offset / region.stride; | |
151 | } | |
152 | } | |
153 | return region_trees + region_idx * tree_size; | |
154 | } | |
155 | ||
156 | void tcg_tb_insert(TranslationBlock *tb) | |
157 | { | |
158 | struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); | |
159 | ||
160 | g_assert(rt != NULL); | |
161 | qemu_mutex_lock(&rt->lock); | |
162 | g_tree_insert(rt->tree, &tb->tc, tb); | |
163 | qemu_mutex_unlock(&rt->lock); | |
164 | } | |
165 | ||
166 | void tcg_tb_remove(TranslationBlock *tb) | |
167 | { | |
168 | struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); | |
169 | ||
170 | g_assert(rt != NULL); | |
171 | qemu_mutex_lock(&rt->lock); | |
172 | g_tree_remove(rt->tree, &tb->tc); | |
173 | qemu_mutex_unlock(&rt->lock); | |
174 | } | |
175 | ||
176 | /* | |
177 | * Find the TB 'tb' such that | |
178 | * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size | |
179 | * Return NULL if not found. | |
180 | */ | |
181 | TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) | |
182 | { | |
183 | struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr); | |
184 | TranslationBlock *tb; | |
185 | struct tb_tc s = { .ptr = (void *)tc_ptr }; | |
186 | ||
187 | if (rt == NULL) { | |
188 | return NULL; | |
189 | } | |
190 | ||
191 | qemu_mutex_lock(&rt->lock); | |
192 | tb = g_tree_lookup(rt->tree, &s); | |
193 | qemu_mutex_unlock(&rt->lock); | |
194 | return tb; | |
195 | } | |
196 | ||
197 | static void tcg_region_tree_lock_all(void) | |
198 | { | |
199 | size_t i; | |
200 | ||
201 | for (i = 0; i < region.n; i++) { | |
202 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
203 | ||
204 | qemu_mutex_lock(&rt->lock); | |
205 | } | |
206 | } | |
207 | ||
208 | static void tcg_region_tree_unlock_all(void) | |
209 | { | |
210 | size_t i; | |
211 | ||
212 | for (i = 0; i < region.n; i++) { | |
213 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
214 | ||
215 | qemu_mutex_unlock(&rt->lock); | |
216 | } | |
217 | } | |
218 | ||
219 | void tcg_tb_foreach(GTraverseFunc func, gpointer user_data) | |
220 | { | |
221 | size_t i; | |
222 | ||
223 | tcg_region_tree_lock_all(); | |
224 | for (i = 0; i < region.n; i++) { | |
225 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
226 | ||
227 | g_tree_foreach(rt->tree, func, user_data); | |
228 | } | |
229 | tcg_region_tree_unlock_all(); | |
230 | } | |
231 | ||
232 | size_t tcg_nb_tbs(void) | |
233 | { | |
234 | size_t nb_tbs = 0; | |
235 | size_t i; | |
236 | ||
237 | tcg_region_tree_lock_all(); | |
238 | for (i = 0; i < region.n; i++) { | |
239 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
240 | ||
241 | nb_tbs += g_tree_nnodes(rt->tree); | |
242 | } | |
243 | tcg_region_tree_unlock_all(); | |
244 | return nb_tbs; | |
245 | } | |
246 | ||
247 | static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data) | |
248 | { | |
249 | TranslationBlock *tb = v; | |
250 | ||
251 | tb_destroy(tb); | |
252 | return FALSE; | |
253 | } | |
254 | ||
255 | static void tcg_region_tree_reset_all(void) | |
256 | { | |
257 | size_t i; | |
258 | ||
259 | tcg_region_tree_lock_all(); | |
260 | for (i = 0; i < region.n; i++) { | |
261 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
262 | ||
263 | g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL); | |
264 | /* Increment the refcount first so that destroy acts as a reset */ | |
265 | g_tree_ref(rt->tree); | |
266 | g_tree_destroy(rt->tree); | |
267 | } | |
268 | tcg_region_tree_unlock_all(); | |
269 | } | |
270 | ||
271 | static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend) | |
272 | { | |
273 | void *start, *end; | |
274 | ||
275 | start = region.start_aligned + curr_region * region.stride; | |
276 | end = start + region.size; | |
277 | ||
278 | if (curr_region == 0) { | |
279 | start = region.start; | |
280 | } | |
281 | if (curr_region == region.n - 1) { | |
282 | end = region.end; | |
283 | } | |
284 | ||
285 | *pstart = start; | |
286 | *pend = end; | |
287 | } | |
288 | ||
289 | static void tcg_region_assign(TCGContext *s, size_t curr_region) | |
290 | { | |
291 | void *start, *end; | |
292 | ||
293 | tcg_region_bounds(curr_region, &start, &end); | |
294 | ||
295 | s->code_gen_buffer = start; | |
296 | s->code_gen_ptr = start; | |
297 | s->code_gen_buffer_size = end - start; | |
298 | s->code_gen_highwater = end - TCG_HIGHWATER; | |
299 | } | |
300 | ||
301 | static bool tcg_region_alloc__locked(TCGContext *s) | |
302 | { | |
303 | if (region.current == region.n) { | |
304 | return true; | |
305 | } | |
306 | tcg_region_assign(s, region.current); | |
307 | region.current++; | |
308 | return false; | |
309 | } | |
310 | ||
311 | /* | |
312 | * Request a new region once the one in use has filled up. | |
313 | * Returns true on error. | |
314 | */ | |
315 | bool tcg_region_alloc(TCGContext *s) | |
316 | { | |
317 | bool err; | |
318 | /* read the region size now; alloc__locked will overwrite it on success */ | |
319 | size_t size_full = s->code_gen_buffer_size; | |
320 | ||
321 | qemu_mutex_lock(®ion.lock); | |
322 | err = tcg_region_alloc__locked(s); | |
323 | if (!err) { | |
324 | region.agg_size_full += size_full - TCG_HIGHWATER; | |
325 | } | |
326 | qemu_mutex_unlock(®ion.lock); | |
327 | return err; | |
328 | } | |
329 | ||
330 | /* | |
331 | * Perform a context's first region allocation. | |
332 | * This function does _not_ increment region.agg_size_full. | |
333 | */ | |
334 | static void tcg_region_initial_alloc__locked(TCGContext *s) | |
335 | { | |
336 | bool err = tcg_region_alloc__locked(s); | |
337 | g_assert(!err); | |
338 | } | |
339 | ||
340 | void tcg_region_initial_alloc(TCGContext *s) | |
341 | { | |
342 | qemu_mutex_lock(®ion.lock); | |
343 | tcg_region_initial_alloc__locked(s); | |
344 | qemu_mutex_unlock(®ion.lock); | |
345 | } | |
346 | ||
347 | /* Call from a safe-work context */ | |
348 | void tcg_region_reset_all(void) | |
349 | { | |
350 | unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); | |
351 | unsigned int i; | |
352 | ||
353 | qemu_mutex_lock(®ion.lock); | |
354 | region.current = 0; | |
355 | region.agg_size_full = 0; | |
356 | ||
357 | for (i = 0; i < n_ctxs; i++) { | |
358 | TCGContext *s = qatomic_read(&tcg_ctxs[i]); | |
359 | tcg_region_initial_alloc__locked(s); | |
360 | } | |
361 | qemu_mutex_unlock(®ion.lock); | |
362 | ||
363 | tcg_region_tree_reset_all(); | |
364 | } | |
365 | ||
366 | static size_t tcg_n_regions(unsigned max_cpus) | |
367 | { | |
368 | #ifdef CONFIG_USER_ONLY | |
369 | return 1; | |
370 | #else | |
371 | /* | |
372 | * It is likely that some vCPUs will translate more code than others, | |
373 | * so we first try to set more regions than max_cpus, with those regions | |
374 | * being of reasonable size. If that's not possible we make do by evenly | |
375 | * dividing the code_gen_buffer among the vCPUs. | |
376 | */ | |
377 | size_t i; | |
378 | ||
379 | /* Use a single region if all we have is one vCPU thread */ | |
380 | if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) { | |
381 | return 1; | |
382 | } | |
383 | ||
384 | /* Try to have more regions than max_cpus, with each region being >= 2 MB */ | |
385 | for (i = 8; i > 0; i--) { | |
386 | size_t regions_per_thread = i; | |
387 | size_t region_size; | |
388 | ||
389 | region_size = tcg_init_ctx.code_gen_buffer_size; | |
390 | region_size /= max_cpus * regions_per_thread; | |
391 | ||
392 | if (region_size >= 2 * 1024u * 1024) { | |
393 | return max_cpus * regions_per_thread; | |
394 | } | |
395 | } | |
396 | /* If we can't, then just allocate one region per vCPU thread */ | |
397 | return max_cpus; | |
398 | #endif | |
399 | } | |
400 | ||
401 | /* | |
402 | * Minimum size of the code gen buffer. This number is randomly chosen, | |
403 | * but not so small that we can't have a fair number of TB's live. | |
404 | * | |
405 | * Maximum size, MAX_CODE_GEN_BUFFER_SIZE, is defined in tcg-target.h. | |
406 | * Unless otherwise indicated, this is constrained by the range of | |
407 | * direct branches on the host cpu, as used by the TCG implementation | |
408 | * of goto_tb. | |
409 | */ | |
410 | #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB) | |
411 | ||
412 | #if TCG_TARGET_REG_BITS == 32 | |
413 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB) | |
414 | #ifdef CONFIG_USER_ONLY | |
415 | /* | |
416 | * For user mode on smaller 32 bit systems we may run into trouble | |
417 | * allocating big chunks of data in the right place. On these systems | |
418 | * we utilise a static code generation buffer directly in the binary. | |
419 | */ | |
420 | #define USE_STATIC_CODE_GEN_BUFFER | |
421 | #endif | |
422 | #else /* TCG_TARGET_REG_BITS == 64 */ | |
423 | #ifdef CONFIG_USER_ONLY | |
424 | /* | |
425 | * As user-mode emulation typically means running multiple instances | |
426 | * of the translator don't go too nuts with our default code gen | |
427 | * buffer lest we make things too hard for the OS. | |
428 | */ | |
429 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB) | |
430 | #else | |
431 | /* | |
432 | * We expect most system emulation to run one or two guests per host. | |
433 | * Users running large scale system emulation may want to tweak their | |
434 | * runtime setup via the tb-size control on the command line. | |
435 | */ | |
436 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB) | |
437 | #endif | |
438 | #endif | |
439 | ||
440 | #define DEFAULT_CODE_GEN_BUFFER_SIZE \ | |
441 | (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ | |
442 | ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) | |
443 | ||
444 | static size_t size_code_gen_buffer(size_t tb_size) | |
445 | { | |
446 | /* Size the buffer. */ | |
447 | if (tb_size == 0) { | |
448 | size_t phys_mem = qemu_get_host_physmem(); | |
449 | if (phys_mem == 0) { | |
450 | tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; | |
451 | } else { | |
452 | tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8); | |
453 | } | |
454 | } | |
455 | if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { | |
456 | tb_size = MIN_CODE_GEN_BUFFER_SIZE; | |
457 | } | |
458 | if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { | |
459 | tb_size = MAX_CODE_GEN_BUFFER_SIZE; | |
460 | } | |
461 | return tb_size; | |
462 | } | |
463 | ||
464 | #ifdef __mips__ | |
465 | /* | |
466 | * In order to use J and JAL within the code_gen_buffer, we require | |
467 | * that the buffer not cross a 256MB boundary. | |
468 | */ | |
469 | static inline bool cross_256mb(void *addr, size_t size) | |
470 | { | |
471 | return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; | |
472 | } | |
473 | ||
474 | /* | |
475 | * We weren't able to allocate a buffer without crossing that boundary, | |
476 | * so make do with the larger portion of the buffer that doesn't cross. | |
477 | * Returns the new base of the buffer, and adjusts code_gen_buffer_size. | |
478 | */ | |
479 | static inline void *split_cross_256mb(void *buf1, size_t size1) | |
480 | { | |
481 | void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); | |
482 | size_t size2 = buf1 + size1 - buf2; | |
483 | ||
484 | size1 = buf2 - buf1; | |
485 | if (size1 < size2) { | |
486 | size1 = size2; | |
487 | buf1 = buf2; | |
488 | } | |
489 | ||
490 | tcg_ctx->code_gen_buffer_size = size1; | |
491 | return buf1; | |
492 | } | |
493 | #endif | |
494 | ||
495 | #ifdef USE_STATIC_CODE_GEN_BUFFER | |
496 | static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] | |
497 | __attribute__((aligned(CODE_GEN_ALIGN))); | |
498 | ||
499 | static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) | |
500 | { | |
501 | void *buf, *end; | |
502 | size_t size; | |
503 | ||
504 | if (splitwx > 0) { | |
505 | error_setg(errp, "jit split-wx not supported"); | |
506 | return false; | |
507 | } | |
508 | ||
509 | /* page-align the beginning and end of the buffer */ | |
510 | buf = static_code_gen_buffer; | |
511 | end = static_code_gen_buffer + sizeof(static_code_gen_buffer); | |
512 | buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size); | |
513 | end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size); | |
514 | ||
515 | size = end - buf; | |
516 | ||
517 | /* Honor a command-line option limiting the size of the buffer. */ | |
518 | if (size > tb_size) { | |
519 | size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size); | |
520 | } | |
521 | tcg_ctx->code_gen_buffer_size = size; | |
522 | ||
523 | #ifdef __mips__ | |
524 | if (cross_256mb(buf, size)) { | |
525 | buf = split_cross_256mb(buf, size); | |
526 | size = tcg_ctx->code_gen_buffer_size; | |
527 | } | |
528 | #endif | |
529 | ||
530 | if (qemu_mprotect_rwx(buf, size)) { | |
531 | error_setg_errno(errp, errno, "mprotect of jit buffer"); | |
532 | return false; | |
533 | } | |
534 | qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); | |
535 | ||
536 | tcg_ctx->code_gen_buffer = buf; | |
537 | return true; | |
538 | } | |
539 | #elif defined(_WIN32) | |
540 | static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) | |
541 | { | |
542 | void *buf; | |
543 | ||
544 | if (splitwx > 0) { | |
545 | error_setg(errp, "jit split-wx not supported"); | |
546 | return false; | |
547 | } | |
548 | ||
549 | buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, | |
550 | PAGE_EXECUTE_READWRITE); | |
551 | if (buf == NULL) { | |
552 | error_setg_win32(errp, GetLastError(), | |
553 | "allocate %zu bytes for jit buffer", size); | |
554 | return false; | |
555 | } | |
556 | ||
557 | tcg_ctx->code_gen_buffer = buf; | |
558 | tcg_ctx->code_gen_buffer_size = size; | |
559 | return true; | |
560 | } | |
561 | #else | |
562 | static bool alloc_code_gen_buffer_anon(size_t size, int prot, | |
563 | int flags, Error **errp) | |
564 | { | |
565 | void *buf; | |
566 | ||
567 | buf = mmap(NULL, size, prot, flags, -1, 0); | |
568 | if (buf == MAP_FAILED) { | |
569 | error_setg_errno(errp, errno, | |
570 | "allocate %zu bytes for jit buffer", size); | |
571 | return false; | |
572 | } | |
573 | tcg_ctx->code_gen_buffer_size = size; | |
574 | ||
575 | #ifdef __mips__ | |
576 | if (cross_256mb(buf, size)) { | |
577 | /* | |
578 | * Try again, with the original still mapped, to avoid re-acquiring | |
579 | * the same 256mb crossing. | |
580 | */ | |
581 | size_t size2; | |
582 | void *buf2 = mmap(NULL, size, prot, flags, -1, 0); | |
583 | switch ((int)(buf2 != MAP_FAILED)) { | |
584 | case 1: | |
585 | if (!cross_256mb(buf2, size)) { | |
586 | /* Success! Use the new buffer. */ | |
587 | munmap(buf, size); | |
588 | break; | |
589 | } | |
590 | /* Failure. Work with what we had. */ | |
591 | munmap(buf2, size); | |
592 | /* fallthru */ | |
593 | default: | |
594 | /* Split the original buffer. Free the smaller half. */ | |
595 | buf2 = split_cross_256mb(buf, size); | |
596 | size2 = tcg_ctx->code_gen_buffer_size; | |
597 | if (buf == buf2) { | |
598 | munmap(buf + size2, size - size2); | |
599 | } else { | |
600 | munmap(buf, size - size2); | |
601 | } | |
602 | size = size2; | |
603 | break; | |
604 | } | |
605 | buf = buf2; | |
606 | } | |
607 | #endif | |
608 | ||
609 | /* Request large pages for the buffer. */ | |
610 | qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); | |
611 | ||
612 | tcg_ctx->code_gen_buffer = buf; | |
613 | return true; | |
614 | } | |
615 | ||
616 | #ifndef CONFIG_TCG_INTERPRETER | |
617 | #ifdef CONFIG_POSIX | |
618 | #include "qemu/memfd.h" | |
619 | ||
620 | static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) | |
621 | { | |
622 | void *buf_rw = NULL, *buf_rx = MAP_FAILED; | |
623 | int fd = -1; | |
624 | ||
625 | #ifdef __mips__ | |
626 | /* Find space for the RX mapping, vs the 256MiB regions. */ | |
627 | if (!alloc_code_gen_buffer_anon(size, PROT_NONE, | |
628 | MAP_PRIVATE | MAP_ANONYMOUS | | |
629 | MAP_NORESERVE, errp)) { | |
630 | return false; | |
631 | } | |
632 | /* The size of the mapping may have been adjusted. */ | |
633 | size = tcg_ctx->code_gen_buffer_size; | |
634 | buf_rx = tcg_ctx->code_gen_buffer; | |
635 | #endif | |
636 | ||
637 | buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp); | |
638 | if (buf_rw == NULL) { | |
639 | goto fail; | |
640 | } | |
641 | ||
642 | #ifdef __mips__ | |
643 | void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC, | |
644 | MAP_SHARED | MAP_FIXED, fd, 0); | |
645 | if (tmp != buf_rx) { | |
646 | goto fail_rx; | |
647 | } | |
648 | #else | |
649 | buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0); | |
650 | if (buf_rx == MAP_FAILED) { | |
651 | goto fail_rx; | |
652 | } | |
653 | #endif | |
654 | ||
655 | close(fd); | |
656 | tcg_ctx->code_gen_buffer = buf_rw; | |
657 | tcg_ctx->code_gen_buffer_size = size; | |
658 | tcg_splitwx_diff = buf_rx - buf_rw; | |
659 | ||
660 | /* Request large pages for the buffer and the splitwx. */ | |
661 | qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE); | |
662 | qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE); | |
663 | return true; | |
664 | ||
665 | fail_rx: | |
666 | error_setg_errno(errp, errno, "failed to map shared memory for execute"); | |
667 | fail: | |
668 | if (buf_rx != MAP_FAILED) { | |
669 | munmap(buf_rx, size); | |
670 | } | |
671 | if (buf_rw) { | |
672 | munmap(buf_rw, size); | |
673 | } | |
674 | if (fd >= 0) { | |
675 | close(fd); | |
676 | } | |
677 | return false; | |
678 | } | |
679 | #endif /* CONFIG_POSIX */ | |
680 | ||
681 | #ifdef CONFIG_DARWIN | |
682 | #include <mach/mach.h> | |
683 | ||
684 | extern kern_return_t mach_vm_remap(vm_map_t target_task, | |
685 | mach_vm_address_t *target_address, | |
686 | mach_vm_size_t size, | |
687 | mach_vm_offset_t mask, | |
688 | int flags, | |
689 | vm_map_t src_task, | |
690 | mach_vm_address_t src_address, | |
691 | boolean_t copy, | |
692 | vm_prot_t *cur_protection, | |
693 | vm_prot_t *max_protection, | |
694 | vm_inherit_t inheritance); | |
695 | ||
696 | static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp) | |
697 | { | |
698 | kern_return_t ret; | |
699 | mach_vm_address_t buf_rw, buf_rx; | |
700 | vm_prot_t cur_prot, max_prot; | |
701 | ||
702 | /* Map the read-write portion via normal anon memory. */ | |
703 | if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE, | |
704 | MAP_PRIVATE | MAP_ANONYMOUS, errp)) { | |
705 | return false; | |
706 | } | |
707 | ||
708 | buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer; | |
709 | buf_rx = 0; | |
710 | ret = mach_vm_remap(mach_task_self(), | |
711 | &buf_rx, | |
712 | size, | |
713 | 0, | |
714 | VM_FLAGS_ANYWHERE, | |
715 | mach_task_self(), | |
716 | buf_rw, | |
717 | false, | |
718 | &cur_prot, | |
719 | &max_prot, | |
720 | VM_INHERIT_NONE); | |
721 | if (ret != KERN_SUCCESS) { | |
722 | /* TODO: Convert "ret" to a human readable error message. */ | |
723 | error_setg(errp, "vm_remap for jit splitwx failed"); | |
724 | munmap((void *)buf_rw, size); | |
725 | return false; | |
726 | } | |
727 | ||
728 | if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) { | |
729 | error_setg_errno(errp, errno, "mprotect for jit splitwx"); | |
730 | munmap((void *)buf_rx, size); | |
731 | munmap((void *)buf_rw, size); | |
732 | return false; | |
733 | } | |
734 | ||
735 | tcg_splitwx_diff = buf_rx - buf_rw; | |
736 | return true; | |
737 | } | |
738 | #endif /* CONFIG_DARWIN */ | |
739 | #endif /* CONFIG_TCG_INTERPRETER */ | |
740 | ||
741 | static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp) | |
742 | { | |
743 | #ifndef CONFIG_TCG_INTERPRETER | |
744 | # ifdef CONFIG_DARWIN | |
745 | return alloc_code_gen_buffer_splitwx_vmremap(size, errp); | |
746 | # endif | |
747 | # ifdef CONFIG_POSIX | |
748 | return alloc_code_gen_buffer_splitwx_memfd(size, errp); | |
749 | # endif | |
750 | #endif | |
751 | error_setg(errp, "jit split-wx not supported"); | |
752 | return false; | |
753 | } | |
754 | ||
755 | static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) | |
756 | { | |
757 | ERRP_GUARD(); | |
758 | int prot, flags; | |
759 | ||
760 | if (splitwx) { | |
761 | if (alloc_code_gen_buffer_splitwx(size, errp)) { | |
762 | return true; | |
763 | } | |
764 | /* | |
765 | * If splitwx force-on (1), fail; | |
766 | * if splitwx default-on (-1), fall through to splitwx off. | |
767 | */ | |
768 | if (splitwx > 0) { | |
769 | return false; | |
770 | } | |
771 | error_free_or_abort(errp); | |
772 | } | |
773 | ||
774 | prot = PROT_READ | PROT_WRITE | PROT_EXEC; | |
775 | flags = MAP_PRIVATE | MAP_ANONYMOUS; | |
776 | #ifdef CONFIG_TCG_INTERPRETER | |
777 | /* The tcg interpreter does not need execute permission. */ | |
778 | prot = PROT_READ | PROT_WRITE; | |
779 | #elif defined(CONFIG_DARWIN) | |
780 | /* Applicable to both iOS and macOS (Apple Silicon). */ | |
781 | if (!splitwx) { | |
782 | flags |= MAP_JIT; | |
783 | } | |
784 | #endif | |
785 | ||
786 | return alloc_code_gen_buffer_anon(size, prot, flags, errp); | |
787 | } | |
788 | #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ | |
789 | ||
790 | /* | |
791 | * Initializes region partitioning. | |
792 | * | |
793 | * Called at init time from the parent thread (i.e. the one calling | |
794 | * tcg_context_init), after the target's TCG globals have been set. | |
795 | * | |
796 | * Region partitioning works by splitting code_gen_buffer into separate regions, | |
797 | * and then assigning regions to TCG threads so that the threads can translate | |
798 | * code in parallel without synchronization. | |
799 | * | |
800 | * In softmmu the number of TCG threads is bounded by max_cpus, so we use at | |
801 | * least max_cpus regions in MTTCG. In !MTTCG we use a single region. | |
802 | * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...]) | |
803 | * must have been parsed before calling this function, since it calls | |
804 | * qemu_tcg_mttcg_enabled(). | |
805 | * | |
806 | * In user-mode we use a single region. Having multiple regions in user-mode | |
807 | * is not supported, because the number of vCPU threads (recall that each thread | |
808 | * spawned by the guest corresponds to a vCPU thread) is only bounded by the | |
809 | * OS, and usually this number is huge (tens of thousands is not uncommon). | |
810 | * Thus, given this large bound on the number of vCPU threads and the fact | |
811 | * that code_gen_buffer is allocated at compile-time, we cannot guarantee | |
812 | * that the availability of at least one region per vCPU thread. | |
813 | * | |
814 | * However, this user-mode limitation is unlikely to be a significant problem | |
815 | * in practice. Multi-threaded guests share most if not all of their translated | |
816 | * code, which makes parallel code generation less appealing than in softmmu. | |
817 | */ | |
818 | void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) | |
819 | { | |
820 | void *buf, *aligned; | |
821 | size_t size; | |
822 | size_t page_size; | |
823 | size_t region_size; | |
824 | size_t n_regions; | |
825 | size_t i; | |
826 | bool ok; | |
827 | ||
828 | ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size), | |
829 | splitwx, &error_fatal); | |
830 | assert(ok); | |
831 | ||
832 | buf = tcg_init_ctx.code_gen_buffer; | |
833 | size = tcg_init_ctx.code_gen_buffer_size; | |
834 | page_size = qemu_real_host_page_size; | |
835 | n_regions = tcg_n_regions(max_cpus); | |
836 | ||
837 | /* The first region will be 'aligned - buf' bytes larger than the others */ | |
838 | aligned = QEMU_ALIGN_PTR_UP(buf, page_size); | |
839 | g_assert(aligned < tcg_init_ctx.code_gen_buffer + size); | |
840 | /* | |
841 | * Make region_size a multiple of page_size, using aligned as the start. | |
842 | * As a result of this we might end up with a few extra pages at the end of | |
843 | * the buffer; we will assign those to the last region. | |
844 | */ | |
845 | region_size = (size - (aligned - buf)) / n_regions; | |
846 | region_size = QEMU_ALIGN_DOWN(region_size, page_size); | |
847 | ||
848 | /* A region must have at least 2 pages; one code, one guard */ | |
849 | g_assert(region_size >= 2 * page_size); | |
850 | ||
851 | /* init the region struct */ | |
852 | qemu_mutex_init(®ion.lock); | |
853 | region.n = n_regions; | |
854 | region.size = region_size - page_size; | |
855 | region.stride = region_size; | |
856 | region.start = buf; | |
857 | region.start_aligned = aligned; | |
858 | /* page-align the end, since its last page will be a guard page */ | |
859 | region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size); | |
860 | /* account for that last guard page */ | |
861 | region.end -= page_size; | |
862 | ||
863 | /* | |
864 | * Set guard pages in the rw buffer, as that's the one into which | |
865 | * buffer overruns could occur. Do not set guard pages in the rx | |
866 | * buffer -- let that one use hugepages throughout. | |
867 | */ | |
868 | for (i = 0; i < region.n; i++) { | |
869 | void *start, *end; | |
870 | ||
871 | tcg_region_bounds(i, &start, &end); | |
872 | ||
873 | /* | |
874 | * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect | |
875 | * rejects a permission change from RWX -> NONE. Guard pages are | |
876 | * nice for bug detection but are not essential; ignore any failure. | |
877 | */ | |
878 | (void)qemu_mprotect_none(end, page_size); | |
879 | } | |
880 | ||
881 | tcg_region_trees_init(); | |
882 | ||
883 | /* | |
884 | * Leave the initial context initialized to the first region. | |
885 | * This will be the context into which we generate the prologue. | |
886 | * It is also the only context for CONFIG_USER_ONLY. | |
887 | */ | |
888 | tcg_region_initial_alloc__locked(&tcg_init_ctx); | |
889 | } | |
890 | ||
891 | void tcg_region_prologue_set(TCGContext *s) | |
892 | { | |
893 | /* Deduct the prologue from the first region. */ | |
894 | g_assert(region.start == s->code_gen_buffer); | |
895 | region.start = s->code_ptr; | |
896 | ||
897 | /* Recompute boundaries of the first region. */ | |
898 | tcg_region_assign(s, 0); | |
899 | ||
900 | /* Register the balance of the buffer with gdb. */ | |
901 | tcg_register_jit(tcg_splitwx_to_rx(region.start), | |
902 | region.end - region.start); | |
903 | } | |
904 | ||
905 | /* | |
906 | * Returns the size (in bytes) of all translated code (i.e. from all regions) | |
907 | * currently in the cache. | |
908 | * See also: tcg_code_capacity() | |
909 | * Do not confuse with tcg_current_code_size(); that one applies to a single | |
910 | * TCG context. | |
911 | */ | |
912 | size_t tcg_code_size(void) | |
913 | { | |
914 | unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); | |
915 | unsigned int i; | |
916 | size_t total; | |
917 | ||
918 | qemu_mutex_lock(®ion.lock); | |
919 | total = region.agg_size_full; | |
920 | for (i = 0; i < n_ctxs; i++) { | |
921 | const TCGContext *s = qatomic_read(&tcg_ctxs[i]); | |
922 | size_t size; | |
923 | ||
924 | size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer; | |
925 | g_assert(size <= s->code_gen_buffer_size); | |
926 | total += size; | |
927 | } | |
928 | qemu_mutex_unlock(®ion.lock); | |
929 | return total; | |
930 | } | |
931 | ||
932 | /* | |
933 | * Returns the code capacity (in bytes) of the entire cache, i.e. including all | |
934 | * regions. | |
935 | * See also: tcg_code_size() | |
936 | */ | |
937 | size_t tcg_code_capacity(void) | |
938 | { | |
939 | size_t guard_size, capacity; | |
940 | ||
941 | /* no need for synchronization; these variables are set at init time */ | |
942 | guard_size = region.stride - region.size; | |
943 | capacity = region.end + guard_size - region.start; | |
944 | capacity -= region.n * (guard_size + TCG_HIGHWATER); | |
945 | return capacity; | |
946 | } | |
947 | ||
948 | size_t tcg_tb_phys_invalidate_count(void) | |
949 | { | |
950 | unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); | |
951 | unsigned int i; | |
952 | size_t total = 0; | |
953 | ||
954 | for (i = 0; i < n_ctxs; i++) { | |
955 | const TCGContext *s = qatomic_read(&tcg_ctxs[i]); | |
956 | ||
957 | total += qatomic_read(&s->tb_phys_invalidate_count); | |
958 | } | |
959 | return total; | |
960 | } |