]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Physical memory management | |
3 | * | |
4 | * Copyright 2011 Red Hat, Inc. and/or its affiliates | |
5 | * | |
6 | * Authors: | |
7 | * Avi Kivity <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Contributions after 2012-01-13 are licensed under the terms of the | |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
14 | */ | |
15 | ||
16 | #include "qemu/osdep.h" | |
17 | #include "qapi/error.h" | |
18 | #include "cpu.h" | |
19 | #include "exec/memory.h" | |
20 | #include "exec/address-spaces.h" | |
21 | #include "qapi/visitor.h" | |
22 | #include "qemu/bitops.h" | |
23 | #include "qemu/error-report.h" | |
24 | #include "qemu/main-loop.h" | |
25 | #include "qemu/qemu-print.h" | |
26 | #include "qom/object.h" | |
27 | #include "trace-root.h" | |
28 | ||
29 | #include "exec/memory-internal.h" | |
30 | #include "exec/ram_addr.h" | |
31 | #include "sysemu/kvm.h" | |
32 | #include "sysemu/runstate.h" | |
33 | #include "sysemu/tcg.h" | |
34 | #include "sysemu/accel.h" | |
35 | #include "hw/boards.h" | |
36 | #include "migration/vmstate.h" | |
37 | ||
38 | //#define DEBUG_UNASSIGNED | |
39 | ||
40 | static unsigned memory_region_transaction_depth; | |
41 | static bool memory_region_update_pending; | |
42 | static bool ioeventfd_update_pending; | |
43 | bool global_dirty_log; | |
44 | ||
45 | static QTAILQ_HEAD(, MemoryListener) memory_listeners | |
46 | = QTAILQ_HEAD_INITIALIZER(memory_listeners); | |
47 | ||
48 | static QTAILQ_HEAD(, AddressSpace) address_spaces | |
49 | = QTAILQ_HEAD_INITIALIZER(address_spaces); | |
50 | ||
51 | static GHashTable *flat_views; | |
52 | ||
53 | typedef struct AddrRange AddrRange; | |
54 | ||
55 | /* | |
56 | * Note that signed integers are needed for negative offsetting in aliases | |
57 | * (large MemoryRegion::alias_offset). | |
58 | */ | |
59 | struct AddrRange { | |
60 | Int128 start; | |
61 | Int128 size; | |
62 | }; | |
63 | ||
64 | static AddrRange addrrange_make(Int128 start, Int128 size) | |
65 | { | |
66 | return (AddrRange) { start, size }; | |
67 | } | |
68 | ||
69 | static bool addrrange_equal(AddrRange r1, AddrRange r2) | |
70 | { | |
71 | return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); | |
72 | } | |
73 | ||
74 | static Int128 addrrange_end(AddrRange r) | |
75 | { | |
76 | return int128_add(r.start, r.size); | |
77 | } | |
78 | ||
79 | static AddrRange addrrange_shift(AddrRange range, Int128 delta) | |
80 | { | |
81 | int128_addto(&range.start, delta); | |
82 | return range; | |
83 | } | |
84 | ||
85 | static bool addrrange_contains(AddrRange range, Int128 addr) | |
86 | { | |
87 | return int128_ge(addr, range.start) | |
88 | && int128_lt(addr, addrrange_end(range)); | |
89 | } | |
90 | ||
91 | static bool addrrange_intersects(AddrRange r1, AddrRange r2) | |
92 | { | |
93 | return addrrange_contains(r1, r2.start) | |
94 | || addrrange_contains(r2, r1.start); | |
95 | } | |
96 | ||
97 | static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) | |
98 | { | |
99 | Int128 start = int128_max(r1.start, r2.start); | |
100 | Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); | |
101 | return addrrange_make(start, int128_sub(end, start)); | |
102 | } | |
103 | ||
104 | enum ListenerDirection { Forward, Reverse }; | |
105 | ||
106 | #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \ | |
107 | do { \ | |
108 | MemoryListener *_listener; \ | |
109 | \ | |
110 | switch (_direction) { \ | |
111 | case Forward: \ | |
112 | QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ | |
113 | if (_listener->_callback) { \ | |
114 | _listener->_callback(_listener, ##_args); \ | |
115 | } \ | |
116 | } \ | |
117 | break; \ | |
118 | case Reverse: \ | |
119 | QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \ | |
120 | if (_listener->_callback) { \ | |
121 | _listener->_callback(_listener, ##_args); \ | |
122 | } \ | |
123 | } \ | |
124 | break; \ | |
125 | default: \ | |
126 | abort(); \ | |
127 | } \ | |
128 | } while (0) | |
129 | ||
130 | #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \ | |
131 | do { \ | |
132 | MemoryListener *_listener; \ | |
133 | \ | |
134 | switch (_direction) { \ | |
135 | case Forward: \ | |
136 | QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \ | |
137 | if (_listener->_callback) { \ | |
138 | _listener->_callback(_listener, _section, ##_args); \ | |
139 | } \ | |
140 | } \ | |
141 | break; \ | |
142 | case Reverse: \ | |
143 | QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \ | |
144 | if (_listener->_callback) { \ | |
145 | _listener->_callback(_listener, _section, ##_args); \ | |
146 | } \ | |
147 | } \ | |
148 | break; \ | |
149 | default: \ | |
150 | abort(); \ | |
151 | } \ | |
152 | } while (0) | |
153 | ||
154 | /* No need to ref/unref .mr, the FlatRange keeps it alive. */ | |
155 | #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \ | |
156 | do { \ | |
157 | MemoryRegionSection mrs = section_from_flat_range(fr, \ | |
158 | address_space_to_flatview(as)); \ | |
159 | MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \ | |
160 | } while(0) | |
161 | ||
162 | struct CoalescedMemoryRange { | |
163 | AddrRange addr; | |
164 | QTAILQ_ENTRY(CoalescedMemoryRange) link; | |
165 | }; | |
166 | ||
167 | struct MemoryRegionIoeventfd { | |
168 | AddrRange addr; | |
169 | bool match_data; | |
170 | uint64_t data; | |
171 | EventNotifier *e; | |
172 | }; | |
173 | ||
174 | static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a, | |
175 | MemoryRegionIoeventfd *b) | |
176 | { | |
177 | if (int128_lt(a->addr.start, b->addr.start)) { | |
178 | return true; | |
179 | } else if (int128_gt(a->addr.start, b->addr.start)) { | |
180 | return false; | |
181 | } else if (int128_lt(a->addr.size, b->addr.size)) { | |
182 | return true; | |
183 | } else if (int128_gt(a->addr.size, b->addr.size)) { | |
184 | return false; | |
185 | } else if (a->match_data < b->match_data) { | |
186 | return true; | |
187 | } else if (a->match_data > b->match_data) { | |
188 | return false; | |
189 | } else if (a->match_data) { | |
190 | if (a->data < b->data) { | |
191 | return true; | |
192 | } else if (a->data > b->data) { | |
193 | return false; | |
194 | } | |
195 | } | |
196 | if (a->e < b->e) { | |
197 | return true; | |
198 | } else if (a->e > b->e) { | |
199 | return false; | |
200 | } | |
201 | return false; | |
202 | } | |
203 | ||
204 | static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a, | |
205 | MemoryRegionIoeventfd *b) | |
206 | { | |
207 | return !memory_region_ioeventfd_before(a, b) | |
208 | && !memory_region_ioeventfd_before(b, a); | |
209 | } | |
210 | ||
211 | /* Range of memory in the global map. Addresses are absolute. */ | |
212 | struct FlatRange { | |
213 | MemoryRegion *mr; | |
214 | hwaddr offset_in_region; | |
215 | AddrRange addr; | |
216 | uint8_t dirty_log_mask; | |
217 | bool romd_mode; | |
218 | bool readonly; | |
219 | bool nonvolatile; | |
220 | }; | |
221 | ||
222 | #define FOR_EACH_FLAT_RANGE(var, view) \ | |
223 | for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) | |
224 | ||
225 | static inline MemoryRegionSection | |
226 | section_from_flat_range(FlatRange *fr, FlatView *fv) | |
227 | { | |
228 | return (MemoryRegionSection) { | |
229 | .mr = fr->mr, | |
230 | .fv = fv, | |
231 | .offset_within_region = fr->offset_in_region, | |
232 | .size = fr->addr.size, | |
233 | .offset_within_address_space = int128_get64(fr->addr.start), | |
234 | .readonly = fr->readonly, | |
235 | .nonvolatile = fr->nonvolatile, | |
236 | }; | |
237 | } | |
238 | ||
239 | static bool flatrange_equal(FlatRange *a, FlatRange *b) | |
240 | { | |
241 | return a->mr == b->mr | |
242 | && addrrange_equal(a->addr, b->addr) | |
243 | && a->offset_in_region == b->offset_in_region | |
244 | && a->romd_mode == b->romd_mode | |
245 | && a->readonly == b->readonly | |
246 | && a->nonvolatile == b->nonvolatile; | |
247 | } | |
248 | ||
249 | static FlatView *flatview_new(MemoryRegion *mr_root) | |
250 | { | |
251 | FlatView *view; | |
252 | ||
253 | view = g_new0(FlatView, 1); | |
254 | view->ref = 1; | |
255 | view->root = mr_root; | |
256 | memory_region_ref(mr_root); | |
257 | trace_flatview_new(view, mr_root); | |
258 | ||
259 | return view; | |
260 | } | |
261 | ||
262 | /* Insert a range into a given position. Caller is responsible for maintaining | |
263 | * sorting order. | |
264 | */ | |
265 | static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) | |
266 | { | |
267 | if (view->nr == view->nr_allocated) { | |
268 | view->nr_allocated = MAX(2 * view->nr, 10); | |
269 | view->ranges = g_realloc(view->ranges, | |
270 | view->nr_allocated * sizeof(*view->ranges)); | |
271 | } | |
272 | memmove(view->ranges + pos + 1, view->ranges + pos, | |
273 | (view->nr - pos) * sizeof(FlatRange)); | |
274 | view->ranges[pos] = *range; | |
275 | memory_region_ref(range->mr); | |
276 | ++view->nr; | |
277 | } | |
278 | ||
279 | static void flatview_destroy(FlatView *view) | |
280 | { | |
281 | int i; | |
282 | ||
283 | trace_flatview_destroy(view, view->root); | |
284 | if (view->dispatch) { | |
285 | address_space_dispatch_free(view->dispatch); | |
286 | } | |
287 | for (i = 0; i < view->nr; i++) { | |
288 | memory_region_unref(view->ranges[i].mr); | |
289 | } | |
290 | g_free(view->ranges); | |
291 | memory_region_unref(view->root); | |
292 | g_free(view); | |
293 | } | |
294 | ||
295 | static bool flatview_ref(FlatView *view) | |
296 | { | |
297 | return atomic_fetch_inc_nonzero(&view->ref) > 0; | |
298 | } | |
299 | ||
300 | void flatview_unref(FlatView *view) | |
301 | { | |
302 | if (atomic_fetch_dec(&view->ref) == 1) { | |
303 | trace_flatview_destroy_rcu(view, view->root); | |
304 | assert(view->root); | |
305 | call_rcu(view, flatview_destroy, rcu); | |
306 | } | |
307 | } | |
308 | ||
309 | static bool can_merge(FlatRange *r1, FlatRange *r2) | |
310 | { | |
311 | return int128_eq(addrrange_end(r1->addr), r2->addr.start) | |
312 | && r1->mr == r2->mr | |
313 | && int128_eq(int128_add(int128_make64(r1->offset_in_region), | |
314 | r1->addr.size), | |
315 | int128_make64(r2->offset_in_region)) | |
316 | && r1->dirty_log_mask == r2->dirty_log_mask | |
317 | && r1->romd_mode == r2->romd_mode | |
318 | && r1->readonly == r2->readonly | |
319 | && r1->nonvolatile == r2->nonvolatile; | |
320 | } | |
321 | ||
322 | /* Attempt to simplify a view by merging adjacent ranges */ | |
323 | static void flatview_simplify(FlatView *view) | |
324 | { | |
325 | unsigned i, j, k; | |
326 | ||
327 | i = 0; | |
328 | while (i < view->nr) { | |
329 | j = i + 1; | |
330 | while (j < view->nr | |
331 | && can_merge(&view->ranges[j-1], &view->ranges[j])) { | |
332 | int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); | |
333 | ++j; | |
334 | } | |
335 | ++i; | |
336 | for (k = i; k < j; k++) { | |
337 | memory_region_unref(view->ranges[k].mr); | |
338 | } | |
339 | memmove(&view->ranges[i], &view->ranges[j], | |
340 | (view->nr - j) * sizeof(view->ranges[j])); | |
341 | view->nr -= j - i; | |
342 | } | |
343 | } | |
344 | ||
345 | static bool memory_region_big_endian(MemoryRegion *mr) | |
346 | { | |
347 | #ifdef TARGET_WORDS_BIGENDIAN | |
348 | return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; | |
349 | #else | |
350 | return mr->ops->endianness == DEVICE_BIG_ENDIAN; | |
351 | #endif | |
352 | } | |
353 | ||
354 | static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) | |
355 | { | |
356 | if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { | |
357 | switch (op & MO_SIZE) { | |
358 | case MO_8: | |
359 | break; | |
360 | case MO_16: | |
361 | *data = bswap16(*data); | |
362 | break; | |
363 | case MO_32: | |
364 | *data = bswap32(*data); | |
365 | break; | |
366 | case MO_64: | |
367 | *data = bswap64(*data); | |
368 | break; | |
369 | default: | |
370 | g_assert_not_reached(); | |
371 | } | |
372 | } | |
373 | } | |
374 | ||
375 | static inline void memory_region_shift_read_access(uint64_t *value, | |
376 | signed shift, | |
377 | uint64_t mask, | |
378 | uint64_t tmp) | |
379 | { | |
380 | if (shift >= 0) { | |
381 | *value |= (tmp & mask) << shift; | |
382 | } else { | |
383 | *value |= (tmp & mask) >> -shift; | |
384 | } | |
385 | } | |
386 | ||
387 | static inline uint64_t memory_region_shift_write_access(uint64_t *value, | |
388 | signed shift, | |
389 | uint64_t mask) | |
390 | { | |
391 | uint64_t tmp; | |
392 | ||
393 | if (shift >= 0) { | |
394 | tmp = (*value >> shift) & mask; | |
395 | } else { | |
396 | tmp = (*value << -shift) & mask; | |
397 | } | |
398 | ||
399 | return tmp; | |
400 | } | |
401 | ||
402 | static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset) | |
403 | { | |
404 | MemoryRegion *root; | |
405 | hwaddr abs_addr = offset; | |
406 | ||
407 | abs_addr += mr->addr; | |
408 | for (root = mr; root->container; ) { | |
409 | root = root->container; | |
410 | abs_addr += root->addr; | |
411 | } | |
412 | ||
413 | return abs_addr; | |
414 | } | |
415 | ||
416 | static int get_cpu_index(void) | |
417 | { | |
418 | if (current_cpu) { | |
419 | return current_cpu->cpu_index; | |
420 | } | |
421 | return -1; | |
422 | } | |
423 | ||
424 | static MemTxResult memory_region_read_accessor(MemoryRegion *mr, | |
425 | hwaddr addr, | |
426 | uint64_t *value, | |
427 | unsigned size, | |
428 | signed shift, | |
429 | uint64_t mask, | |
430 | MemTxAttrs attrs) | |
431 | { | |
432 | uint64_t tmp; | |
433 | ||
434 | tmp = mr->ops->read(mr->opaque, addr, size); | |
435 | if (mr->subpage) { | |
436 | trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); | |
437 | } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { | |
438 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); | |
439 | trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); | |
440 | } | |
441 | memory_region_shift_read_access(value, shift, mask, tmp); | |
442 | return MEMTX_OK; | |
443 | } | |
444 | ||
445 | static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, | |
446 | hwaddr addr, | |
447 | uint64_t *value, | |
448 | unsigned size, | |
449 | signed shift, | |
450 | uint64_t mask, | |
451 | MemTxAttrs attrs) | |
452 | { | |
453 | uint64_t tmp = 0; | |
454 | MemTxResult r; | |
455 | ||
456 | r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs); | |
457 | if (mr->subpage) { | |
458 | trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); | |
459 | } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { | |
460 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); | |
461 | trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); | |
462 | } | |
463 | memory_region_shift_read_access(value, shift, mask, tmp); | |
464 | return r; | |
465 | } | |
466 | ||
467 | static MemTxResult memory_region_write_accessor(MemoryRegion *mr, | |
468 | hwaddr addr, | |
469 | uint64_t *value, | |
470 | unsigned size, | |
471 | signed shift, | |
472 | uint64_t mask, | |
473 | MemTxAttrs attrs) | |
474 | { | |
475 | uint64_t tmp = memory_region_shift_write_access(value, shift, mask); | |
476 | ||
477 | if (mr->subpage) { | |
478 | trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); | |
479 | } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { | |
480 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); | |
481 | trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); | |
482 | } | |
483 | mr->ops->write(mr->opaque, addr, tmp, size); | |
484 | return MEMTX_OK; | |
485 | } | |
486 | ||
487 | static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, | |
488 | hwaddr addr, | |
489 | uint64_t *value, | |
490 | unsigned size, | |
491 | signed shift, | |
492 | uint64_t mask, | |
493 | MemTxAttrs attrs) | |
494 | { | |
495 | uint64_t tmp = memory_region_shift_write_access(value, shift, mask); | |
496 | ||
497 | if (mr->subpage) { | |
498 | trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); | |
499 | } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { | |
500 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); | |
501 | trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); | |
502 | } | |
503 | return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs); | |
504 | } | |
505 | ||
506 | static MemTxResult access_with_adjusted_size(hwaddr addr, | |
507 | uint64_t *value, | |
508 | unsigned size, | |
509 | unsigned access_size_min, | |
510 | unsigned access_size_max, | |
511 | MemTxResult (*access_fn) | |
512 | (MemoryRegion *mr, | |
513 | hwaddr addr, | |
514 | uint64_t *value, | |
515 | unsigned size, | |
516 | signed shift, | |
517 | uint64_t mask, | |
518 | MemTxAttrs attrs), | |
519 | MemoryRegion *mr, | |
520 | MemTxAttrs attrs) | |
521 | { | |
522 | uint64_t access_mask; | |
523 | unsigned access_size; | |
524 | unsigned i; | |
525 | MemTxResult r = MEMTX_OK; | |
526 | ||
527 | if (!access_size_min) { | |
528 | access_size_min = 1; | |
529 | } | |
530 | if (!access_size_max) { | |
531 | access_size_max = 4; | |
532 | } | |
533 | ||
534 | /* FIXME: support unaligned access? */ | |
535 | access_size = MAX(MIN(size, access_size_max), access_size_min); | |
536 | access_mask = MAKE_64BIT_MASK(0, access_size * 8); | |
537 | if (memory_region_big_endian(mr)) { | |
538 | for (i = 0; i < size; i += access_size) { | |
539 | r |= access_fn(mr, addr + i, value, access_size, | |
540 | (size - access_size - i) * 8, access_mask, attrs); | |
541 | } | |
542 | } else { | |
543 | for (i = 0; i < size; i += access_size) { | |
544 | r |= access_fn(mr, addr + i, value, access_size, i * 8, | |
545 | access_mask, attrs); | |
546 | } | |
547 | } | |
548 | return r; | |
549 | } | |
550 | ||
551 | static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) | |
552 | { | |
553 | AddressSpace *as; | |
554 | ||
555 | while (mr->container) { | |
556 | mr = mr->container; | |
557 | } | |
558 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
559 | if (mr == as->root) { | |
560 | return as; | |
561 | } | |
562 | } | |
563 | return NULL; | |
564 | } | |
565 | ||
566 | /* Render a memory region into the global view. Ranges in @view obscure | |
567 | * ranges in @mr. | |
568 | */ | |
569 | static void render_memory_region(FlatView *view, | |
570 | MemoryRegion *mr, | |
571 | Int128 base, | |
572 | AddrRange clip, | |
573 | bool readonly, | |
574 | bool nonvolatile) | |
575 | { | |
576 | MemoryRegion *subregion; | |
577 | unsigned i; | |
578 | hwaddr offset_in_region; | |
579 | Int128 remain; | |
580 | Int128 now; | |
581 | FlatRange fr; | |
582 | AddrRange tmp; | |
583 | ||
584 | if (!mr->enabled) { | |
585 | return; | |
586 | } | |
587 | ||
588 | int128_addto(&base, int128_make64(mr->addr)); | |
589 | readonly |= mr->readonly; | |
590 | nonvolatile |= mr->nonvolatile; | |
591 | ||
592 | tmp = addrrange_make(base, mr->size); | |
593 | ||
594 | if (!addrrange_intersects(tmp, clip)) { | |
595 | return; | |
596 | } | |
597 | ||
598 | clip = addrrange_intersection(tmp, clip); | |
599 | ||
600 | if (mr->alias) { | |
601 | int128_subfrom(&base, int128_make64(mr->alias->addr)); | |
602 | int128_subfrom(&base, int128_make64(mr->alias_offset)); | |
603 | render_memory_region(view, mr->alias, base, clip, | |
604 | readonly, nonvolatile); | |
605 | return; | |
606 | } | |
607 | ||
608 | /* Render subregions in priority order. */ | |
609 | QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { | |
610 | render_memory_region(view, subregion, base, clip, | |
611 | readonly, nonvolatile); | |
612 | } | |
613 | ||
614 | if (!mr->terminates) { | |
615 | return; | |
616 | } | |
617 | ||
618 | offset_in_region = int128_get64(int128_sub(clip.start, base)); | |
619 | base = clip.start; | |
620 | remain = clip.size; | |
621 | ||
622 | fr.mr = mr; | |
623 | fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr); | |
624 | fr.romd_mode = mr->romd_mode; | |
625 | fr.readonly = readonly; | |
626 | fr.nonvolatile = nonvolatile; | |
627 | ||
628 | /* Render the region itself into any gaps left by the current view. */ | |
629 | for (i = 0; i < view->nr && int128_nz(remain); ++i) { | |
630 | if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { | |
631 | continue; | |
632 | } | |
633 | if (int128_lt(base, view->ranges[i].addr.start)) { | |
634 | now = int128_min(remain, | |
635 | int128_sub(view->ranges[i].addr.start, base)); | |
636 | fr.offset_in_region = offset_in_region; | |
637 | fr.addr = addrrange_make(base, now); | |
638 | flatview_insert(view, i, &fr); | |
639 | ++i; | |
640 | int128_addto(&base, now); | |
641 | offset_in_region += int128_get64(now); | |
642 | int128_subfrom(&remain, now); | |
643 | } | |
644 | now = int128_sub(int128_min(int128_add(base, remain), | |
645 | addrrange_end(view->ranges[i].addr)), | |
646 | base); | |
647 | int128_addto(&base, now); | |
648 | offset_in_region += int128_get64(now); | |
649 | int128_subfrom(&remain, now); | |
650 | } | |
651 | if (int128_nz(remain)) { | |
652 | fr.offset_in_region = offset_in_region; | |
653 | fr.addr = addrrange_make(base, remain); | |
654 | flatview_insert(view, i, &fr); | |
655 | } | |
656 | } | |
657 | ||
658 | static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr) | |
659 | { | |
660 | while (mr->enabled) { | |
661 | if (mr->alias) { | |
662 | if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) { | |
663 | /* The alias is included in its entirety. Use it as | |
664 | * the "real" root, so that we can share more FlatViews. | |
665 | */ | |
666 | mr = mr->alias; | |
667 | continue; | |
668 | } | |
669 | } else if (!mr->terminates) { | |
670 | unsigned int found = 0; | |
671 | MemoryRegion *child, *next = NULL; | |
672 | QTAILQ_FOREACH(child, &mr->subregions, subregions_link) { | |
673 | if (child->enabled) { | |
674 | if (++found > 1) { | |
675 | next = NULL; | |
676 | break; | |
677 | } | |
678 | if (!child->addr && int128_ge(mr->size, child->size)) { | |
679 | /* A child is included in its entirety. If it's the only | |
680 | * enabled one, use it in the hope of finding an alias down the | |
681 | * way. This will also let us share FlatViews. | |
682 | */ | |
683 | next = child; | |
684 | } | |
685 | } | |
686 | } | |
687 | if (found == 0) { | |
688 | return NULL; | |
689 | } | |
690 | if (next) { | |
691 | mr = next; | |
692 | continue; | |
693 | } | |
694 | } | |
695 | ||
696 | return mr; | |
697 | } | |
698 | ||
699 | return NULL; | |
700 | } | |
701 | ||
702 | /* Render a memory topology into a list of disjoint absolute ranges. */ | |
703 | static FlatView *generate_memory_topology(MemoryRegion *mr) | |
704 | { | |
705 | int i; | |
706 | FlatView *view; | |
707 | ||
708 | view = flatview_new(mr); | |
709 | ||
710 | if (mr) { | |
711 | render_memory_region(view, mr, int128_zero(), | |
712 | addrrange_make(int128_zero(), int128_2_64()), | |
713 | false, false); | |
714 | } | |
715 | flatview_simplify(view); | |
716 | ||
717 | view->dispatch = address_space_dispatch_new(view); | |
718 | for (i = 0; i < view->nr; i++) { | |
719 | MemoryRegionSection mrs = | |
720 | section_from_flat_range(&view->ranges[i], view); | |
721 | flatview_add_to_dispatch(view, &mrs); | |
722 | } | |
723 | address_space_dispatch_compact(view->dispatch); | |
724 | g_hash_table_replace(flat_views, mr, view); | |
725 | ||
726 | return view; | |
727 | } | |
728 | ||
729 | static void address_space_add_del_ioeventfds(AddressSpace *as, | |
730 | MemoryRegionIoeventfd *fds_new, | |
731 | unsigned fds_new_nb, | |
732 | MemoryRegionIoeventfd *fds_old, | |
733 | unsigned fds_old_nb) | |
734 | { | |
735 | unsigned iold, inew; | |
736 | MemoryRegionIoeventfd *fd; | |
737 | MemoryRegionSection section; | |
738 | ||
739 | /* Generate a symmetric difference of the old and new fd sets, adding | |
740 | * and deleting as necessary. | |
741 | */ | |
742 | ||
743 | iold = inew = 0; | |
744 | while (iold < fds_old_nb || inew < fds_new_nb) { | |
745 | if (iold < fds_old_nb | |
746 | && (inew == fds_new_nb | |
747 | || memory_region_ioeventfd_before(&fds_old[iold], | |
748 | &fds_new[inew]))) { | |
749 | fd = &fds_old[iold]; | |
750 | section = (MemoryRegionSection) { | |
751 | .fv = address_space_to_flatview(as), | |
752 | .offset_within_address_space = int128_get64(fd->addr.start), | |
753 | .size = fd->addr.size, | |
754 | }; | |
755 | MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion, | |
756 | fd->match_data, fd->data, fd->e); | |
757 | ++iold; | |
758 | } else if (inew < fds_new_nb | |
759 | && (iold == fds_old_nb | |
760 | || memory_region_ioeventfd_before(&fds_new[inew], | |
761 | &fds_old[iold]))) { | |
762 | fd = &fds_new[inew]; | |
763 | section = (MemoryRegionSection) { | |
764 | .fv = address_space_to_flatview(as), | |
765 | .offset_within_address_space = int128_get64(fd->addr.start), | |
766 | .size = fd->addr.size, | |
767 | }; | |
768 | MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion, | |
769 | fd->match_data, fd->data, fd->e); | |
770 | ++inew; | |
771 | } else { | |
772 | ++iold; | |
773 | ++inew; | |
774 | } | |
775 | } | |
776 | } | |
777 | ||
778 | FlatView *address_space_get_flatview(AddressSpace *as) | |
779 | { | |
780 | FlatView *view; | |
781 | ||
782 | RCU_READ_LOCK_GUARD(); | |
783 | do { | |
784 | view = address_space_to_flatview(as); | |
785 | /* If somebody has replaced as->current_map concurrently, | |
786 | * flatview_ref returns false. | |
787 | */ | |
788 | } while (!flatview_ref(view)); | |
789 | return view; | |
790 | } | |
791 | ||
792 | static void address_space_update_ioeventfds(AddressSpace *as) | |
793 | { | |
794 | FlatView *view; | |
795 | FlatRange *fr; | |
796 | unsigned ioeventfd_nb = 0; | |
797 | MemoryRegionIoeventfd *ioeventfds = NULL; | |
798 | AddrRange tmp; | |
799 | unsigned i; | |
800 | ||
801 | view = address_space_get_flatview(as); | |
802 | FOR_EACH_FLAT_RANGE(fr, view) { | |
803 | for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { | |
804 | tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, | |
805 | int128_sub(fr->addr.start, | |
806 | int128_make64(fr->offset_in_region))); | |
807 | if (addrrange_intersects(fr->addr, tmp)) { | |
808 | ++ioeventfd_nb; | |
809 | ioeventfds = g_realloc(ioeventfds, | |
810 | ioeventfd_nb * sizeof(*ioeventfds)); | |
811 | ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i]; | |
812 | ioeventfds[ioeventfd_nb-1].addr = tmp; | |
813 | } | |
814 | } | |
815 | } | |
816 | ||
817 | address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, | |
818 | as->ioeventfds, as->ioeventfd_nb); | |
819 | ||
820 | g_free(as->ioeventfds); | |
821 | as->ioeventfds = ioeventfds; | |
822 | as->ioeventfd_nb = ioeventfd_nb; | |
823 | flatview_unref(view); | |
824 | } | |
825 | ||
826 | /* | |
827 | * Notify the memory listeners about the coalesced IO change events of | |
828 | * range `cmr'. Only the part that has intersection of the specified | |
829 | * FlatRange will be sent. | |
830 | */ | |
831 | static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as, | |
832 | CoalescedMemoryRange *cmr, bool add) | |
833 | { | |
834 | AddrRange tmp; | |
835 | ||
836 | tmp = addrrange_shift(cmr->addr, | |
837 | int128_sub(fr->addr.start, | |
838 | int128_make64(fr->offset_in_region))); | |
839 | if (!addrrange_intersects(tmp, fr->addr)) { | |
840 | return; | |
841 | } | |
842 | tmp = addrrange_intersection(tmp, fr->addr); | |
843 | ||
844 | if (add) { | |
845 | MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add, | |
846 | int128_get64(tmp.start), | |
847 | int128_get64(tmp.size)); | |
848 | } else { | |
849 | MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del, | |
850 | int128_get64(tmp.start), | |
851 | int128_get64(tmp.size)); | |
852 | } | |
853 | } | |
854 | ||
855 | static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as) | |
856 | { | |
857 | CoalescedMemoryRange *cmr; | |
858 | ||
859 | QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) { | |
860 | flat_range_coalesced_io_notify(fr, as, cmr, false); | |
861 | } | |
862 | } | |
863 | ||
864 | static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as) | |
865 | { | |
866 | MemoryRegion *mr = fr->mr; | |
867 | CoalescedMemoryRange *cmr; | |
868 | ||
869 | if (QTAILQ_EMPTY(&mr->coalesced)) { | |
870 | return; | |
871 | } | |
872 | ||
873 | QTAILQ_FOREACH(cmr, &mr->coalesced, link) { | |
874 | flat_range_coalesced_io_notify(fr, as, cmr, true); | |
875 | } | |
876 | } | |
877 | ||
878 | static void address_space_update_topology_pass(AddressSpace *as, | |
879 | const FlatView *old_view, | |
880 | const FlatView *new_view, | |
881 | bool adding) | |
882 | { | |
883 | unsigned iold, inew; | |
884 | FlatRange *frold, *frnew; | |
885 | ||
886 | /* Generate a symmetric difference of the old and new memory maps. | |
887 | * Kill ranges in the old map, and instantiate ranges in the new map. | |
888 | */ | |
889 | iold = inew = 0; | |
890 | while (iold < old_view->nr || inew < new_view->nr) { | |
891 | if (iold < old_view->nr) { | |
892 | frold = &old_view->ranges[iold]; | |
893 | } else { | |
894 | frold = NULL; | |
895 | } | |
896 | if (inew < new_view->nr) { | |
897 | frnew = &new_view->ranges[inew]; | |
898 | } else { | |
899 | frnew = NULL; | |
900 | } | |
901 | ||
902 | if (frold | |
903 | && (!frnew | |
904 | || int128_lt(frold->addr.start, frnew->addr.start) | |
905 | || (int128_eq(frold->addr.start, frnew->addr.start) | |
906 | && !flatrange_equal(frold, frnew)))) { | |
907 | /* In old but not in new, or in both but attributes changed. */ | |
908 | ||
909 | if (!adding) { | |
910 | flat_range_coalesced_io_del(frold, as); | |
911 | MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); | |
912 | } | |
913 | ||
914 | ++iold; | |
915 | } else if (frold && frnew && flatrange_equal(frold, frnew)) { | |
916 | /* In both and unchanged (except logging may have changed) */ | |
917 | ||
918 | if (adding) { | |
919 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); | |
920 | if (frnew->dirty_log_mask & ~frold->dirty_log_mask) { | |
921 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start, | |
922 | frold->dirty_log_mask, | |
923 | frnew->dirty_log_mask); | |
924 | } | |
925 | if (frold->dirty_log_mask & ~frnew->dirty_log_mask) { | |
926 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop, | |
927 | frold->dirty_log_mask, | |
928 | frnew->dirty_log_mask); | |
929 | } | |
930 | } | |
931 | ||
932 | ++iold; | |
933 | ++inew; | |
934 | } else { | |
935 | /* In new */ | |
936 | ||
937 | if (adding) { | |
938 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); | |
939 | flat_range_coalesced_io_add(frnew, as); | |
940 | } | |
941 | ||
942 | ++inew; | |
943 | } | |
944 | } | |
945 | } | |
946 | ||
947 | static void flatviews_init(void) | |
948 | { | |
949 | static FlatView *empty_view; | |
950 | ||
951 | if (flat_views) { | |
952 | return; | |
953 | } | |
954 | ||
955 | flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, | |
956 | (GDestroyNotify) flatview_unref); | |
957 | if (!empty_view) { | |
958 | empty_view = generate_memory_topology(NULL); | |
959 | /* We keep it alive forever in the global variable. */ | |
960 | flatview_ref(empty_view); | |
961 | } else { | |
962 | g_hash_table_replace(flat_views, NULL, empty_view); | |
963 | flatview_ref(empty_view); | |
964 | } | |
965 | } | |
966 | ||
967 | static void flatviews_reset(void) | |
968 | { | |
969 | AddressSpace *as; | |
970 | ||
971 | if (flat_views) { | |
972 | g_hash_table_unref(flat_views); | |
973 | flat_views = NULL; | |
974 | } | |
975 | flatviews_init(); | |
976 | ||
977 | /* Render unique FVs */ | |
978 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
979 | MemoryRegion *physmr = memory_region_get_flatview_root(as->root); | |
980 | ||
981 | if (g_hash_table_lookup(flat_views, physmr)) { | |
982 | continue; | |
983 | } | |
984 | ||
985 | generate_memory_topology(physmr); | |
986 | } | |
987 | } | |
988 | ||
989 | static void address_space_set_flatview(AddressSpace *as) | |
990 | { | |
991 | FlatView *old_view = address_space_to_flatview(as); | |
992 | MemoryRegion *physmr = memory_region_get_flatview_root(as->root); | |
993 | FlatView *new_view = g_hash_table_lookup(flat_views, physmr); | |
994 | ||
995 | assert(new_view); | |
996 | ||
997 | if (old_view == new_view) { | |
998 | return; | |
999 | } | |
1000 | ||
1001 | if (old_view) { | |
1002 | flatview_ref(old_view); | |
1003 | } | |
1004 | ||
1005 | flatview_ref(new_view); | |
1006 | ||
1007 | if (!QTAILQ_EMPTY(&as->listeners)) { | |
1008 | FlatView tmpview = { .nr = 0 }, *old_view2 = old_view; | |
1009 | ||
1010 | if (!old_view2) { | |
1011 | old_view2 = &tmpview; | |
1012 | } | |
1013 | address_space_update_topology_pass(as, old_view2, new_view, false); | |
1014 | address_space_update_topology_pass(as, old_view2, new_view, true); | |
1015 | } | |
1016 | ||
1017 | /* Writes are protected by the BQL. */ | |
1018 | atomic_rcu_set(&as->current_map, new_view); | |
1019 | if (old_view) { | |
1020 | flatview_unref(old_view); | |
1021 | } | |
1022 | ||
1023 | /* Note that all the old MemoryRegions are still alive up to this | |
1024 | * point. This relieves most MemoryListeners from the need to | |
1025 | * ref/unref the MemoryRegions they get---unless they use them | |
1026 | * outside the iothread mutex, in which case precise reference | |
1027 | * counting is necessary. | |
1028 | */ | |
1029 | if (old_view) { | |
1030 | flatview_unref(old_view); | |
1031 | } | |
1032 | } | |
1033 | ||
1034 | static void address_space_update_topology(AddressSpace *as) | |
1035 | { | |
1036 | MemoryRegion *physmr = memory_region_get_flatview_root(as->root); | |
1037 | ||
1038 | flatviews_init(); | |
1039 | if (!g_hash_table_lookup(flat_views, physmr)) { | |
1040 | generate_memory_topology(physmr); | |
1041 | } | |
1042 | address_space_set_flatview(as); | |
1043 | } | |
1044 | ||
1045 | void memory_region_transaction_begin(void) | |
1046 | { | |
1047 | qemu_flush_coalesced_mmio_buffer(); | |
1048 | ++memory_region_transaction_depth; | |
1049 | } | |
1050 | ||
1051 | void memory_region_transaction_commit(void) | |
1052 | { | |
1053 | AddressSpace *as; | |
1054 | ||
1055 | assert(memory_region_transaction_depth); | |
1056 | assert(qemu_mutex_iothread_locked()); | |
1057 | ||
1058 | --memory_region_transaction_depth; | |
1059 | if (!memory_region_transaction_depth) { | |
1060 | if (memory_region_update_pending) { | |
1061 | flatviews_reset(); | |
1062 | ||
1063 | MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); | |
1064 | ||
1065 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
1066 | address_space_set_flatview(as); | |
1067 | address_space_update_ioeventfds(as); | |
1068 | } | |
1069 | memory_region_update_pending = false; | |
1070 | ioeventfd_update_pending = false; | |
1071 | MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); | |
1072 | } else if (ioeventfd_update_pending) { | |
1073 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
1074 | address_space_update_ioeventfds(as); | |
1075 | } | |
1076 | ioeventfd_update_pending = false; | |
1077 | } | |
1078 | } | |
1079 | } | |
1080 | ||
1081 | static void memory_region_destructor_none(MemoryRegion *mr) | |
1082 | { | |
1083 | } | |
1084 | ||
1085 | static void memory_region_destructor_ram(MemoryRegion *mr) | |
1086 | { | |
1087 | qemu_ram_free(mr->ram_block); | |
1088 | } | |
1089 | ||
1090 | static bool memory_region_need_escape(char c) | |
1091 | { | |
1092 | return c == '/' || c == '[' || c == '\\' || c == ']'; | |
1093 | } | |
1094 | ||
1095 | static char *memory_region_escape_name(const char *name) | |
1096 | { | |
1097 | const char *p; | |
1098 | char *escaped, *q; | |
1099 | uint8_t c; | |
1100 | size_t bytes = 0; | |
1101 | ||
1102 | for (p = name; *p; p++) { | |
1103 | bytes += memory_region_need_escape(*p) ? 4 : 1; | |
1104 | } | |
1105 | if (bytes == p - name) { | |
1106 | return g_memdup(name, bytes + 1); | |
1107 | } | |
1108 | ||
1109 | escaped = g_malloc(bytes + 1); | |
1110 | for (p = name, q = escaped; *p; p++) { | |
1111 | c = *p; | |
1112 | if (unlikely(memory_region_need_escape(c))) { | |
1113 | *q++ = '\\'; | |
1114 | *q++ = 'x'; | |
1115 | *q++ = "0123456789abcdef"[c >> 4]; | |
1116 | c = "0123456789abcdef"[c & 15]; | |
1117 | } | |
1118 | *q++ = c; | |
1119 | } | |
1120 | *q = 0; | |
1121 | return escaped; | |
1122 | } | |
1123 | ||
1124 | static void memory_region_do_init(MemoryRegion *mr, | |
1125 | Object *owner, | |
1126 | const char *name, | |
1127 | uint64_t size) | |
1128 | { | |
1129 | mr->size = int128_make64(size); | |
1130 | if (size == UINT64_MAX) { | |
1131 | mr->size = int128_2_64(); | |
1132 | } | |
1133 | mr->name = g_strdup(name); | |
1134 | mr->owner = owner; | |
1135 | mr->ram_block = NULL; | |
1136 | ||
1137 | if (name) { | |
1138 | char *escaped_name = memory_region_escape_name(name); | |
1139 | char *name_array = g_strdup_printf("%s[*]", escaped_name); | |
1140 | ||
1141 | if (!owner) { | |
1142 | owner = container_get(qdev_get_machine(), "/unattached"); | |
1143 | } | |
1144 | ||
1145 | object_property_add_child(owner, name_array, OBJECT(mr), &error_abort); | |
1146 | object_unref(OBJECT(mr)); | |
1147 | g_free(name_array); | |
1148 | g_free(escaped_name); | |
1149 | } | |
1150 | } | |
1151 | ||
1152 | void memory_region_init(MemoryRegion *mr, | |
1153 | Object *owner, | |
1154 | const char *name, | |
1155 | uint64_t size) | |
1156 | { | |
1157 | object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION); | |
1158 | memory_region_do_init(mr, owner, name, size); | |
1159 | } | |
1160 | ||
1161 | static void memory_region_get_addr(Object *obj, Visitor *v, const char *name, | |
1162 | void *opaque, Error **errp) | |
1163 | { | |
1164 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1165 | uint64_t value = mr->addr; | |
1166 | ||
1167 | visit_type_uint64(v, name, &value, errp); | |
1168 | } | |
1169 | ||
1170 | static void memory_region_get_container(Object *obj, Visitor *v, | |
1171 | const char *name, void *opaque, | |
1172 | Error **errp) | |
1173 | { | |
1174 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1175 | gchar *path = (gchar *)""; | |
1176 | ||
1177 | if (mr->container) { | |
1178 | path = object_get_canonical_path(OBJECT(mr->container)); | |
1179 | } | |
1180 | visit_type_str(v, name, &path, errp); | |
1181 | if (mr->container) { | |
1182 | g_free(path); | |
1183 | } | |
1184 | } | |
1185 | ||
1186 | static Object *memory_region_resolve_container(Object *obj, void *opaque, | |
1187 | const char *part) | |
1188 | { | |
1189 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1190 | ||
1191 | return OBJECT(mr->container); | |
1192 | } | |
1193 | ||
1194 | static void memory_region_get_priority(Object *obj, Visitor *v, | |
1195 | const char *name, void *opaque, | |
1196 | Error **errp) | |
1197 | { | |
1198 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1199 | int32_t value = mr->priority; | |
1200 | ||
1201 | visit_type_int32(v, name, &value, errp); | |
1202 | } | |
1203 | ||
1204 | static void memory_region_get_size(Object *obj, Visitor *v, const char *name, | |
1205 | void *opaque, Error **errp) | |
1206 | { | |
1207 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1208 | uint64_t value = memory_region_size(mr); | |
1209 | ||
1210 | visit_type_uint64(v, name, &value, errp); | |
1211 | } | |
1212 | ||
1213 | static void memory_region_initfn(Object *obj) | |
1214 | { | |
1215 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1216 | ObjectProperty *op; | |
1217 | ||
1218 | mr->ops = &unassigned_mem_ops; | |
1219 | mr->enabled = true; | |
1220 | mr->romd_mode = true; | |
1221 | mr->global_locking = true; | |
1222 | mr->destructor = memory_region_destructor_none; | |
1223 | QTAILQ_INIT(&mr->subregions); | |
1224 | QTAILQ_INIT(&mr->coalesced); | |
1225 | ||
1226 | op = object_property_add(OBJECT(mr), "container", | |
1227 | "link<" TYPE_MEMORY_REGION ">", | |
1228 | memory_region_get_container, | |
1229 | NULL, /* memory_region_set_container */ | |
1230 | NULL, NULL, &error_abort); | |
1231 | op->resolve = memory_region_resolve_container; | |
1232 | ||
1233 | object_property_add(OBJECT(mr), "addr", "uint64", | |
1234 | memory_region_get_addr, | |
1235 | NULL, /* memory_region_set_addr */ | |
1236 | NULL, NULL, &error_abort); | |
1237 | object_property_add(OBJECT(mr), "priority", "uint32", | |
1238 | memory_region_get_priority, | |
1239 | NULL, /* memory_region_set_priority */ | |
1240 | NULL, NULL, &error_abort); | |
1241 | object_property_add(OBJECT(mr), "size", "uint64", | |
1242 | memory_region_get_size, | |
1243 | NULL, /* memory_region_set_size, */ | |
1244 | NULL, NULL, &error_abort); | |
1245 | } | |
1246 | ||
1247 | static void iommu_memory_region_initfn(Object *obj) | |
1248 | { | |
1249 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1250 | ||
1251 | mr->is_iommu = true; | |
1252 | } | |
1253 | ||
1254 | static uint64_t unassigned_mem_read(void *opaque, hwaddr addr, | |
1255 | unsigned size) | |
1256 | { | |
1257 | #ifdef DEBUG_UNASSIGNED | |
1258 | printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); | |
1259 | #endif | |
1260 | return 0; | |
1261 | } | |
1262 | ||
1263 | static void unassigned_mem_write(void *opaque, hwaddr addr, | |
1264 | uint64_t val, unsigned size) | |
1265 | { | |
1266 | #ifdef DEBUG_UNASSIGNED | |
1267 | printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); | |
1268 | #endif | |
1269 | } | |
1270 | ||
1271 | static bool unassigned_mem_accepts(void *opaque, hwaddr addr, | |
1272 | unsigned size, bool is_write, | |
1273 | MemTxAttrs attrs) | |
1274 | { | |
1275 | return false; | |
1276 | } | |
1277 | ||
1278 | const MemoryRegionOps unassigned_mem_ops = { | |
1279 | .valid.accepts = unassigned_mem_accepts, | |
1280 | .endianness = DEVICE_NATIVE_ENDIAN, | |
1281 | }; | |
1282 | ||
1283 | static uint64_t memory_region_ram_device_read(void *opaque, | |
1284 | hwaddr addr, unsigned size) | |
1285 | { | |
1286 | MemoryRegion *mr = opaque; | |
1287 | uint64_t data = (uint64_t)~0; | |
1288 | ||
1289 | switch (size) { | |
1290 | case 1: | |
1291 | data = *(uint8_t *)(mr->ram_block->host + addr); | |
1292 | break; | |
1293 | case 2: | |
1294 | data = *(uint16_t *)(mr->ram_block->host + addr); | |
1295 | break; | |
1296 | case 4: | |
1297 | data = *(uint32_t *)(mr->ram_block->host + addr); | |
1298 | break; | |
1299 | case 8: | |
1300 | data = *(uint64_t *)(mr->ram_block->host + addr); | |
1301 | break; | |
1302 | } | |
1303 | ||
1304 | trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size); | |
1305 | ||
1306 | return data; | |
1307 | } | |
1308 | ||
1309 | static void memory_region_ram_device_write(void *opaque, hwaddr addr, | |
1310 | uint64_t data, unsigned size) | |
1311 | { | |
1312 | MemoryRegion *mr = opaque; | |
1313 | ||
1314 | trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size); | |
1315 | ||
1316 | switch (size) { | |
1317 | case 1: | |
1318 | *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data; | |
1319 | break; | |
1320 | case 2: | |
1321 | *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data; | |
1322 | break; | |
1323 | case 4: | |
1324 | *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data; | |
1325 | break; | |
1326 | case 8: | |
1327 | *(uint64_t *)(mr->ram_block->host + addr) = data; | |
1328 | break; | |
1329 | } | |
1330 | } | |
1331 | ||
1332 | static const MemoryRegionOps ram_device_mem_ops = { | |
1333 | .read = memory_region_ram_device_read, | |
1334 | .write = memory_region_ram_device_write, | |
1335 | .endianness = DEVICE_HOST_ENDIAN, | |
1336 | .valid = { | |
1337 | .min_access_size = 1, | |
1338 | .max_access_size = 8, | |
1339 | .unaligned = true, | |
1340 | }, | |
1341 | .impl = { | |
1342 | .min_access_size = 1, | |
1343 | .max_access_size = 8, | |
1344 | .unaligned = true, | |
1345 | }, | |
1346 | }; | |
1347 | ||
1348 | bool memory_region_access_valid(MemoryRegion *mr, | |
1349 | hwaddr addr, | |
1350 | unsigned size, | |
1351 | bool is_write, | |
1352 | MemTxAttrs attrs) | |
1353 | { | |
1354 | int access_size_min, access_size_max; | |
1355 | int access_size, i; | |
1356 | ||
1357 | if (!mr->ops->valid.unaligned && (addr & (size - 1))) { | |
1358 | return false; | |
1359 | } | |
1360 | ||
1361 | if (!mr->ops->valid.accepts) { | |
1362 | return true; | |
1363 | } | |
1364 | ||
1365 | access_size_min = mr->ops->valid.min_access_size; | |
1366 | if (!mr->ops->valid.min_access_size) { | |
1367 | access_size_min = 1; | |
1368 | } | |
1369 | ||
1370 | access_size_max = mr->ops->valid.max_access_size; | |
1371 | if (!mr->ops->valid.max_access_size) { | |
1372 | access_size_max = 4; | |
1373 | } | |
1374 | ||
1375 | access_size = MAX(MIN(size, access_size_max), access_size_min); | |
1376 | for (i = 0; i < size; i += access_size) { | |
1377 | if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size, | |
1378 | is_write, attrs)) { | |
1379 | return false; | |
1380 | } | |
1381 | } | |
1382 | ||
1383 | return true; | |
1384 | } | |
1385 | ||
1386 | static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, | |
1387 | hwaddr addr, | |
1388 | uint64_t *pval, | |
1389 | unsigned size, | |
1390 | MemTxAttrs attrs) | |
1391 | { | |
1392 | *pval = 0; | |
1393 | ||
1394 | if (mr->ops->read) { | |
1395 | return access_with_adjusted_size(addr, pval, size, | |
1396 | mr->ops->impl.min_access_size, | |
1397 | mr->ops->impl.max_access_size, | |
1398 | memory_region_read_accessor, | |
1399 | mr, attrs); | |
1400 | } else { | |
1401 | return access_with_adjusted_size(addr, pval, size, | |
1402 | mr->ops->impl.min_access_size, | |
1403 | mr->ops->impl.max_access_size, | |
1404 | memory_region_read_with_attrs_accessor, | |
1405 | mr, attrs); | |
1406 | } | |
1407 | } | |
1408 | ||
1409 | MemTxResult memory_region_dispatch_read(MemoryRegion *mr, | |
1410 | hwaddr addr, | |
1411 | uint64_t *pval, | |
1412 | MemOp op, | |
1413 | MemTxAttrs attrs) | |
1414 | { | |
1415 | unsigned size = memop_size(op); | |
1416 | MemTxResult r; | |
1417 | ||
1418 | if (!memory_region_access_valid(mr, addr, size, false, attrs)) { | |
1419 | *pval = unassigned_mem_read(mr, addr, size); | |
1420 | return MEMTX_DECODE_ERROR; | |
1421 | } | |
1422 | ||
1423 | r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); | |
1424 | adjust_endianness(mr, pval, op); | |
1425 | return r; | |
1426 | } | |
1427 | ||
1428 | /* Return true if an eventfd was signalled */ | |
1429 | static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr, | |
1430 | hwaddr addr, | |
1431 | uint64_t data, | |
1432 | unsigned size, | |
1433 | MemTxAttrs attrs) | |
1434 | { | |
1435 | MemoryRegionIoeventfd ioeventfd = { | |
1436 | .addr = addrrange_make(int128_make64(addr), int128_make64(size)), | |
1437 | .data = data, | |
1438 | }; | |
1439 | unsigned i; | |
1440 | ||
1441 | for (i = 0; i < mr->ioeventfd_nb; i++) { | |
1442 | ioeventfd.match_data = mr->ioeventfds[i].match_data; | |
1443 | ioeventfd.e = mr->ioeventfds[i].e; | |
1444 | ||
1445 | if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) { | |
1446 | event_notifier_set(ioeventfd.e); | |
1447 | return true; | |
1448 | } | |
1449 | } | |
1450 | ||
1451 | return false; | |
1452 | } | |
1453 | ||
1454 | MemTxResult memory_region_dispatch_write(MemoryRegion *mr, | |
1455 | hwaddr addr, | |
1456 | uint64_t data, | |
1457 | MemOp op, | |
1458 | MemTxAttrs attrs) | |
1459 | { | |
1460 | unsigned size = memop_size(op); | |
1461 | ||
1462 | if (!memory_region_access_valid(mr, addr, size, true, attrs)) { | |
1463 | unassigned_mem_write(mr, addr, data, size); | |
1464 | return MEMTX_DECODE_ERROR; | |
1465 | } | |
1466 | ||
1467 | adjust_endianness(mr, &data, op); | |
1468 | ||
1469 | if ((!kvm_eventfds_enabled()) && | |
1470 | memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { | |
1471 | return MEMTX_OK; | |
1472 | } | |
1473 | ||
1474 | if (mr->ops->write) { | |
1475 | return access_with_adjusted_size(addr, &data, size, | |
1476 | mr->ops->impl.min_access_size, | |
1477 | mr->ops->impl.max_access_size, | |
1478 | memory_region_write_accessor, mr, | |
1479 | attrs); | |
1480 | } else { | |
1481 | return | |
1482 | access_with_adjusted_size(addr, &data, size, | |
1483 | mr->ops->impl.min_access_size, | |
1484 | mr->ops->impl.max_access_size, | |
1485 | memory_region_write_with_attrs_accessor, | |
1486 | mr, attrs); | |
1487 | } | |
1488 | } | |
1489 | ||
1490 | void memory_region_init_io(MemoryRegion *mr, | |
1491 | Object *owner, | |
1492 | const MemoryRegionOps *ops, | |
1493 | void *opaque, | |
1494 | const char *name, | |
1495 | uint64_t size) | |
1496 | { | |
1497 | memory_region_init(mr, owner, name, size); | |
1498 | mr->ops = ops ? ops : &unassigned_mem_ops; | |
1499 | mr->opaque = opaque; | |
1500 | mr->terminates = true; | |
1501 | } | |
1502 | ||
1503 | void memory_region_init_ram_nomigrate(MemoryRegion *mr, | |
1504 | Object *owner, | |
1505 | const char *name, | |
1506 | uint64_t size, | |
1507 | Error **errp) | |
1508 | { | |
1509 | memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp); | |
1510 | } | |
1511 | ||
1512 | void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, | |
1513 | Object *owner, | |
1514 | const char *name, | |
1515 | uint64_t size, | |
1516 | bool share, | |
1517 | Error **errp) | |
1518 | { | |
1519 | Error *err = NULL; | |
1520 | memory_region_init(mr, owner, name, size); | |
1521 | mr->ram = true; | |
1522 | mr->terminates = true; | |
1523 | mr->destructor = memory_region_destructor_ram; | |
1524 | mr->ram_block = qemu_ram_alloc(size, share, mr, &err); | |
1525 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1526 | if (err) { | |
1527 | mr->size = int128_zero(); | |
1528 | object_unparent(OBJECT(mr)); | |
1529 | error_propagate(errp, err); | |
1530 | } | |
1531 | } | |
1532 | ||
1533 | void memory_region_init_resizeable_ram(MemoryRegion *mr, | |
1534 | Object *owner, | |
1535 | const char *name, | |
1536 | uint64_t size, | |
1537 | uint64_t max_size, | |
1538 | void (*resized)(const char*, | |
1539 | uint64_t length, | |
1540 | void *host), | |
1541 | Error **errp) | |
1542 | { | |
1543 | Error *err = NULL; | |
1544 | memory_region_init(mr, owner, name, size); | |
1545 | mr->ram = true; | |
1546 | mr->terminates = true; | |
1547 | mr->destructor = memory_region_destructor_ram; | |
1548 | mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized, | |
1549 | mr, &err); | |
1550 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1551 | if (err) { | |
1552 | mr->size = int128_zero(); | |
1553 | object_unparent(OBJECT(mr)); | |
1554 | error_propagate(errp, err); | |
1555 | } | |
1556 | } | |
1557 | ||
1558 | #ifdef CONFIG_POSIX | |
1559 | void memory_region_init_ram_from_file(MemoryRegion *mr, | |
1560 | struct Object *owner, | |
1561 | const char *name, | |
1562 | uint64_t size, | |
1563 | uint64_t align, | |
1564 | uint32_t ram_flags, | |
1565 | const char *path, | |
1566 | Error **errp) | |
1567 | { | |
1568 | Error *err = NULL; | |
1569 | memory_region_init(mr, owner, name, size); | |
1570 | mr->ram = true; | |
1571 | mr->terminates = true; | |
1572 | mr->destructor = memory_region_destructor_ram; | |
1573 | mr->align = align; | |
1574 | mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err); | |
1575 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1576 | if (err) { | |
1577 | mr->size = int128_zero(); | |
1578 | object_unparent(OBJECT(mr)); | |
1579 | error_propagate(errp, err); | |
1580 | } | |
1581 | } | |
1582 | ||
1583 | void memory_region_init_ram_from_fd(MemoryRegion *mr, | |
1584 | struct Object *owner, | |
1585 | const char *name, | |
1586 | uint64_t size, | |
1587 | bool share, | |
1588 | int fd, | |
1589 | Error **errp) | |
1590 | { | |
1591 | Error *err = NULL; | |
1592 | memory_region_init(mr, owner, name, size); | |
1593 | mr->ram = true; | |
1594 | mr->terminates = true; | |
1595 | mr->destructor = memory_region_destructor_ram; | |
1596 | mr->ram_block = qemu_ram_alloc_from_fd(size, mr, | |
1597 | share ? RAM_SHARED : 0, | |
1598 | fd, &err); | |
1599 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1600 | if (err) { | |
1601 | mr->size = int128_zero(); | |
1602 | object_unparent(OBJECT(mr)); | |
1603 | error_propagate(errp, err); | |
1604 | } | |
1605 | } | |
1606 | #endif | |
1607 | ||
1608 | void memory_region_init_ram_ptr(MemoryRegion *mr, | |
1609 | Object *owner, | |
1610 | const char *name, | |
1611 | uint64_t size, | |
1612 | void *ptr) | |
1613 | { | |
1614 | memory_region_init(mr, owner, name, size); | |
1615 | mr->ram = true; | |
1616 | mr->terminates = true; | |
1617 | mr->destructor = memory_region_destructor_ram; | |
1618 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1619 | ||
1620 | /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ | |
1621 | assert(ptr != NULL); | |
1622 | mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); | |
1623 | } | |
1624 | ||
1625 | void memory_region_init_ram_device_ptr(MemoryRegion *mr, | |
1626 | Object *owner, | |
1627 | const char *name, | |
1628 | uint64_t size, | |
1629 | void *ptr) | |
1630 | { | |
1631 | memory_region_init(mr, owner, name, size); | |
1632 | mr->ram = true; | |
1633 | mr->terminates = true; | |
1634 | mr->ram_device = true; | |
1635 | mr->ops = &ram_device_mem_ops; | |
1636 | mr->opaque = mr; | |
1637 | mr->destructor = memory_region_destructor_ram; | |
1638 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1639 | /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ | |
1640 | assert(ptr != NULL); | |
1641 | mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); | |
1642 | } | |
1643 | ||
1644 | void memory_region_init_alias(MemoryRegion *mr, | |
1645 | Object *owner, | |
1646 | const char *name, | |
1647 | MemoryRegion *orig, | |
1648 | hwaddr offset, | |
1649 | uint64_t size) | |
1650 | { | |
1651 | memory_region_init(mr, owner, name, size); | |
1652 | mr->alias = orig; | |
1653 | mr->alias_offset = offset; | |
1654 | } | |
1655 | ||
1656 | void memory_region_init_rom_nomigrate(MemoryRegion *mr, | |
1657 | struct Object *owner, | |
1658 | const char *name, | |
1659 | uint64_t size, | |
1660 | Error **errp) | |
1661 | { | |
1662 | Error *err = NULL; | |
1663 | memory_region_init(mr, owner, name, size); | |
1664 | mr->ram = true; | |
1665 | mr->readonly = true; | |
1666 | mr->terminates = true; | |
1667 | mr->destructor = memory_region_destructor_ram; | |
1668 | mr->ram_block = qemu_ram_alloc(size, false, mr, &err); | |
1669 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1670 | if (err) { | |
1671 | mr->size = int128_zero(); | |
1672 | object_unparent(OBJECT(mr)); | |
1673 | error_propagate(errp, err); | |
1674 | } | |
1675 | } | |
1676 | ||
1677 | void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, | |
1678 | Object *owner, | |
1679 | const MemoryRegionOps *ops, | |
1680 | void *opaque, | |
1681 | const char *name, | |
1682 | uint64_t size, | |
1683 | Error **errp) | |
1684 | { | |
1685 | Error *err = NULL; | |
1686 | assert(ops); | |
1687 | memory_region_init(mr, owner, name, size); | |
1688 | mr->ops = ops; | |
1689 | mr->opaque = opaque; | |
1690 | mr->terminates = true; | |
1691 | mr->rom_device = true; | |
1692 | mr->destructor = memory_region_destructor_ram; | |
1693 | mr->ram_block = qemu_ram_alloc(size, false, mr, &err); | |
1694 | if (err) { | |
1695 | mr->size = int128_zero(); | |
1696 | object_unparent(OBJECT(mr)); | |
1697 | error_propagate(errp, err); | |
1698 | } | |
1699 | } | |
1700 | ||
1701 | void memory_region_init_iommu(void *_iommu_mr, | |
1702 | size_t instance_size, | |
1703 | const char *mrtypename, | |
1704 | Object *owner, | |
1705 | const char *name, | |
1706 | uint64_t size) | |
1707 | { | |
1708 | struct IOMMUMemoryRegion *iommu_mr; | |
1709 | struct MemoryRegion *mr; | |
1710 | ||
1711 | object_initialize(_iommu_mr, instance_size, mrtypename); | |
1712 | mr = MEMORY_REGION(_iommu_mr); | |
1713 | memory_region_do_init(mr, owner, name, size); | |
1714 | iommu_mr = IOMMU_MEMORY_REGION(mr); | |
1715 | mr->terminates = true; /* then re-forwards */ | |
1716 | QLIST_INIT(&iommu_mr->iommu_notify); | |
1717 | iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE; | |
1718 | } | |
1719 | ||
1720 | static void memory_region_finalize(Object *obj) | |
1721 | { | |
1722 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1723 | ||
1724 | assert(!mr->container); | |
1725 | ||
1726 | /* We know the region is not visible in any address space (it | |
1727 | * does not have a container and cannot be a root either because | |
1728 | * it has no references, so we can blindly clear mr->enabled. | |
1729 | * memory_region_set_enabled instead could trigger a transaction | |
1730 | * and cause an infinite loop. | |
1731 | */ | |
1732 | mr->enabled = false; | |
1733 | memory_region_transaction_begin(); | |
1734 | while (!QTAILQ_EMPTY(&mr->subregions)) { | |
1735 | MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions); | |
1736 | memory_region_del_subregion(mr, subregion); | |
1737 | } | |
1738 | memory_region_transaction_commit(); | |
1739 | ||
1740 | mr->destructor(mr); | |
1741 | memory_region_clear_coalescing(mr); | |
1742 | g_free((char *)mr->name); | |
1743 | g_free(mr->ioeventfds); | |
1744 | } | |
1745 | ||
1746 | Object *memory_region_owner(MemoryRegion *mr) | |
1747 | { | |
1748 | Object *obj = OBJECT(mr); | |
1749 | return obj->parent; | |
1750 | } | |
1751 | ||
1752 | void memory_region_ref(MemoryRegion *mr) | |
1753 | { | |
1754 | /* MMIO callbacks most likely will access data that belongs | |
1755 | * to the owner, hence the need to ref/unref the owner whenever | |
1756 | * the memory region is in use. | |
1757 | * | |
1758 | * The memory region is a child of its owner. As long as the | |
1759 | * owner doesn't call unparent itself on the memory region, | |
1760 | * ref-ing the owner will also keep the memory region alive. | |
1761 | * Memory regions without an owner are supposed to never go away; | |
1762 | * we do not ref/unref them because it slows down DMA sensibly. | |
1763 | */ | |
1764 | if (mr && mr->owner) { | |
1765 | object_ref(mr->owner); | |
1766 | } | |
1767 | } | |
1768 | ||
1769 | void memory_region_unref(MemoryRegion *mr) | |
1770 | { | |
1771 | if (mr && mr->owner) { | |
1772 | object_unref(mr->owner); | |
1773 | } | |
1774 | } | |
1775 | ||
1776 | uint64_t memory_region_size(MemoryRegion *mr) | |
1777 | { | |
1778 | if (int128_eq(mr->size, int128_2_64())) { | |
1779 | return UINT64_MAX; | |
1780 | } | |
1781 | return int128_get64(mr->size); | |
1782 | } | |
1783 | ||
1784 | const char *memory_region_name(const MemoryRegion *mr) | |
1785 | { | |
1786 | if (!mr->name) { | |
1787 | ((MemoryRegion *)mr)->name = | |
1788 | object_get_canonical_path_component(OBJECT(mr)); | |
1789 | } | |
1790 | return mr->name; | |
1791 | } | |
1792 | ||
1793 | bool memory_region_is_ram_device(MemoryRegion *mr) | |
1794 | { | |
1795 | return mr->ram_device; | |
1796 | } | |
1797 | ||
1798 | uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) | |
1799 | { | |
1800 | uint8_t mask = mr->dirty_log_mask; | |
1801 | if (global_dirty_log && mr->ram_block) { | |
1802 | mask |= (1 << DIRTY_MEMORY_MIGRATION); | |
1803 | } | |
1804 | return mask; | |
1805 | } | |
1806 | ||
1807 | bool memory_region_is_logging(MemoryRegion *mr, uint8_t client) | |
1808 | { | |
1809 | return memory_region_get_dirty_log_mask(mr) & (1 << client); | |
1810 | } | |
1811 | ||
1812 | static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr, | |
1813 | Error **errp) | |
1814 | { | |
1815 | IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE; | |
1816 | IOMMUNotifier *iommu_notifier; | |
1817 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); | |
1818 | int ret = 0; | |
1819 | ||
1820 | IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { | |
1821 | flags |= iommu_notifier->notifier_flags; | |
1822 | } | |
1823 | ||
1824 | if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) { | |
1825 | ret = imrc->notify_flag_changed(iommu_mr, | |
1826 | iommu_mr->iommu_notify_flags, | |
1827 | flags, errp); | |
1828 | } | |
1829 | ||
1830 | if (!ret) { | |
1831 | iommu_mr->iommu_notify_flags = flags; | |
1832 | } | |
1833 | return ret; | |
1834 | } | |
1835 | ||
1836 | int memory_region_register_iommu_notifier(MemoryRegion *mr, | |
1837 | IOMMUNotifier *n, Error **errp) | |
1838 | { | |
1839 | IOMMUMemoryRegion *iommu_mr; | |
1840 | int ret; | |
1841 | ||
1842 | if (mr->alias) { | |
1843 | return memory_region_register_iommu_notifier(mr->alias, n, errp); | |
1844 | } | |
1845 | ||
1846 | /* We need to register for at least one bitfield */ | |
1847 | iommu_mr = IOMMU_MEMORY_REGION(mr); | |
1848 | assert(n->notifier_flags != IOMMU_NOTIFIER_NONE); | |
1849 | assert(n->start <= n->end); | |
1850 | assert(n->iommu_idx >= 0 && | |
1851 | n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr)); | |
1852 | ||
1853 | QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node); | |
1854 | ret = memory_region_update_iommu_notify_flags(iommu_mr, errp); | |
1855 | if (ret) { | |
1856 | QLIST_REMOVE(n, node); | |
1857 | } | |
1858 | return ret; | |
1859 | } | |
1860 | ||
1861 | uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr) | |
1862 | { | |
1863 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); | |
1864 | ||
1865 | if (imrc->get_min_page_size) { | |
1866 | return imrc->get_min_page_size(iommu_mr); | |
1867 | } | |
1868 | return TARGET_PAGE_SIZE; | |
1869 | } | |
1870 | ||
1871 | void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) | |
1872 | { | |
1873 | MemoryRegion *mr = MEMORY_REGION(iommu_mr); | |
1874 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); | |
1875 | hwaddr addr, granularity; | |
1876 | IOMMUTLBEntry iotlb; | |
1877 | ||
1878 | /* If the IOMMU has its own replay callback, override */ | |
1879 | if (imrc->replay) { | |
1880 | imrc->replay(iommu_mr, n); | |
1881 | return; | |
1882 | } | |
1883 | ||
1884 | granularity = memory_region_iommu_get_min_page_size(iommu_mr); | |
1885 | ||
1886 | for (addr = 0; addr < memory_region_size(mr); addr += granularity) { | |
1887 | iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx); | |
1888 | if (iotlb.perm != IOMMU_NONE) { | |
1889 | n->notify(n, &iotlb); | |
1890 | } | |
1891 | ||
1892 | /* if (2^64 - MR size) < granularity, it's possible to get an | |
1893 | * infinite loop here. This should catch such a wraparound */ | |
1894 | if ((addr + granularity) < addr) { | |
1895 | break; | |
1896 | } | |
1897 | } | |
1898 | } | |
1899 | ||
1900 | void memory_region_unregister_iommu_notifier(MemoryRegion *mr, | |
1901 | IOMMUNotifier *n) | |
1902 | { | |
1903 | IOMMUMemoryRegion *iommu_mr; | |
1904 | ||
1905 | if (mr->alias) { | |
1906 | memory_region_unregister_iommu_notifier(mr->alias, n); | |
1907 | return; | |
1908 | } | |
1909 | QLIST_REMOVE(n, node); | |
1910 | iommu_mr = IOMMU_MEMORY_REGION(mr); | |
1911 | memory_region_update_iommu_notify_flags(iommu_mr, NULL); | |
1912 | } | |
1913 | ||
1914 | void memory_region_notify_one(IOMMUNotifier *notifier, | |
1915 | IOMMUTLBEntry *entry) | |
1916 | { | |
1917 | IOMMUNotifierFlag request_flags; | |
1918 | hwaddr entry_end = entry->iova + entry->addr_mask; | |
1919 | ||
1920 | /* | |
1921 | * Skip the notification if the notification does not overlap | |
1922 | * with registered range. | |
1923 | */ | |
1924 | if (notifier->start > entry_end || notifier->end < entry->iova) { | |
1925 | return; | |
1926 | } | |
1927 | ||
1928 | assert(entry->iova >= notifier->start && entry_end <= notifier->end); | |
1929 | ||
1930 | if (entry->perm & IOMMU_RW) { | |
1931 | request_flags = IOMMU_NOTIFIER_MAP; | |
1932 | } else { | |
1933 | request_flags = IOMMU_NOTIFIER_UNMAP; | |
1934 | } | |
1935 | ||
1936 | if (notifier->notifier_flags & request_flags) { | |
1937 | notifier->notify(notifier, entry); | |
1938 | } | |
1939 | } | |
1940 | ||
1941 | void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, | |
1942 | int iommu_idx, | |
1943 | IOMMUTLBEntry entry) | |
1944 | { | |
1945 | IOMMUNotifier *iommu_notifier; | |
1946 | ||
1947 | assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr))); | |
1948 | ||
1949 | IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { | |
1950 | if (iommu_notifier->iommu_idx == iommu_idx) { | |
1951 | memory_region_notify_one(iommu_notifier, &entry); | |
1952 | } | |
1953 | } | |
1954 | } | |
1955 | ||
1956 | int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, | |
1957 | enum IOMMUMemoryRegionAttr attr, | |
1958 | void *data) | |
1959 | { | |
1960 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); | |
1961 | ||
1962 | if (!imrc->get_attr) { | |
1963 | return -EINVAL; | |
1964 | } | |
1965 | ||
1966 | return imrc->get_attr(iommu_mr, attr, data); | |
1967 | } | |
1968 | ||
1969 | int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr, | |
1970 | MemTxAttrs attrs) | |
1971 | { | |
1972 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); | |
1973 | ||
1974 | if (!imrc->attrs_to_index) { | |
1975 | return 0; | |
1976 | } | |
1977 | ||
1978 | return imrc->attrs_to_index(iommu_mr, attrs); | |
1979 | } | |
1980 | ||
1981 | int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr) | |
1982 | { | |
1983 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); | |
1984 | ||
1985 | if (!imrc->num_indexes) { | |
1986 | return 1; | |
1987 | } | |
1988 | ||
1989 | return imrc->num_indexes(iommu_mr); | |
1990 | } | |
1991 | ||
1992 | void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) | |
1993 | { | |
1994 | uint8_t mask = 1 << client; | |
1995 | uint8_t old_logging; | |
1996 | ||
1997 | assert(client == DIRTY_MEMORY_VGA); | |
1998 | old_logging = mr->vga_logging_count; | |
1999 | mr->vga_logging_count += log ? 1 : -1; | |
2000 | if (!!old_logging == !!mr->vga_logging_count) { | |
2001 | return; | |
2002 | } | |
2003 | ||
2004 | memory_region_transaction_begin(); | |
2005 | mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); | |
2006 | memory_region_update_pending |= mr->enabled; | |
2007 | memory_region_transaction_commit(); | |
2008 | } | |
2009 | ||
2010 | void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, | |
2011 | hwaddr size) | |
2012 | { | |
2013 | assert(mr->ram_block); | |
2014 | cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, | |
2015 | size, | |
2016 | memory_region_get_dirty_log_mask(mr)); | |
2017 | } | |
2018 | ||
2019 | static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) | |
2020 | { | |
2021 | MemoryListener *listener; | |
2022 | AddressSpace *as; | |
2023 | FlatView *view; | |
2024 | FlatRange *fr; | |
2025 | ||
2026 | /* If the same address space has multiple log_sync listeners, we | |
2027 | * visit that address space's FlatView multiple times. But because | |
2028 | * log_sync listeners are rare, it's still cheaper than walking each | |
2029 | * address space once. | |
2030 | */ | |
2031 | QTAILQ_FOREACH(listener, &memory_listeners, link) { | |
2032 | if (!listener->log_sync) { | |
2033 | continue; | |
2034 | } | |
2035 | as = listener->address_space; | |
2036 | view = address_space_get_flatview(as); | |
2037 | FOR_EACH_FLAT_RANGE(fr, view) { | |
2038 | if (fr->dirty_log_mask && (!mr || fr->mr == mr)) { | |
2039 | MemoryRegionSection mrs = section_from_flat_range(fr, view); | |
2040 | listener->log_sync(listener, &mrs); | |
2041 | } | |
2042 | } | |
2043 | flatview_unref(view); | |
2044 | } | |
2045 | } | |
2046 | ||
2047 | void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start, | |
2048 | hwaddr len) | |
2049 | { | |
2050 | MemoryRegionSection mrs; | |
2051 | MemoryListener *listener; | |
2052 | AddressSpace *as; | |
2053 | FlatView *view; | |
2054 | FlatRange *fr; | |
2055 | hwaddr sec_start, sec_end, sec_size; | |
2056 | ||
2057 | QTAILQ_FOREACH(listener, &memory_listeners, link) { | |
2058 | if (!listener->log_clear) { | |
2059 | continue; | |
2060 | } | |
2061 | as = listener->address_space; | |
2062 | view = address_space_get_flatview(as); | |
2063 | FOR_EACH_FLAT_RANGE(fr, view) { | |
2064 | if (!fr->dirty_log_mask || fr->mr != mr) { | |
2065 | /* | |
2066 | * Clear dirty bitmap operation only applies to those | |
2067 | * regions whose dirty logging is at least enabled | |
2068 | */ | |
2069 | continue; | |
2070 | } | |
2071 | ||
2072 | mrs = section_from_flat_range(fr, view); | |
2073 | ||
2074 | sec_start = MAX(mrs.offset_within_region, start); | |
2075 | sec_end = mrs.offset_within_region + int128_get64(mrs.size); | |
2076 | sec_end = MIN(sec_end, start + len); | |
2077 | ||
2078 | if (sec_start >= sec_end) { | |
2079 | /* | |
2080 | * If this memory region section has no intersection | |
2081 | * with the requested range, skip. | |
2082 | */ | |
2083 | continue; | |
2084 | } | |
2085 | ||
2086 | /* Valid case; shrink the section if needed */ | |
2087 | mrs.offset_within_address_space += | |
2088 | sec_start - mrs.offset_within_region; | |
2089 | mrs.offset_within_region = sec_start; | |
2090 | sec_size = sec_end - sec_start; | |
2091 | mrs.size = int128_make64(sec_size); | |
2092 | listener->log_clear(listener, &mrs); | |
2093 | } | |
2094 | flatview_unref(view); | |
2095 | } | |
2096 | } | |
2097 | ||
2098 | DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, | |
2099 | hwaddr addr, | |
2100 | hwaddr size, | |
2101 | unsigned client) | |
2102 | { | |
2103 | DirtyBitmapSnapshot *snapshot; | |
2104 | assert(mr->ram_block); | |
2105 | memory_region_sync_dirty_bitmap(mr); | |
2106 | snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client); | |
2107 | memory_global_after_dirty_log_sync(); | |
2108 | return snapshot; | |
2109 | } | |
2110 | ||
2111 | bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap, | |
2112 | hwaddr addr, hwaddr size) | |
2113 | { | |
2114 | assert(mr->ram_block); | |
2115 | return cpu_physical_memory_snapshot_get_dirty(snap, | |
2116 | memory_region_get_ram_addr(mr) + addr, size); | |
2117 | } | |
2118 | ||
2119 | void memory_region_set_readonly(MemoryRegion *mr, bool readonly) | |
2120 | { | |
2121 | if (mr->readonly != readonly) { | |
2122 | memory_region_transaction_begin(); | |
2123 | mr->readonly = readonly; | |
2124 | memory_region_update_pending |= mr->enabled; | |
2125 | memory_region_transaction_commit(); | |
2126 | } | |
2127 | } | |
2128 | ||
2129 | void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile) | |
2130 | { | |
2131 | if (mr->nonvolatile != nonvolatile) { | |
2132 | memory_region_transaction_begin(); | |
2133 | mr->nonvolatile = nonvolatile; | |
2134 | memory_region_update_pending |= mr->enabled; | |
2135 | memory_region_transaction_commit(); | |
2136 | } | |
2137 | } | |
2138 | ||
2139 | void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) | |
2140 | { | |
2141 | if (mr->romd_mode != romd_mode) { | |
2142 | memory_region_transaction_begin(); | |
2143 | mr->romd_mode = romd_mode; | |
2144 | memory_region_update_pending |= mr->enabled; | |
2145 | memory_region_transaction_commit(); | |
2146 | } | |
2147 | } | |
2148 | ||
2149 | void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, | |
2150 | hwaddr size, unsigned client) | |
2151 | { | |
2152 | assert(mr->ram_block); | |
2153 | cpu_physical_memory_test_and_clear_dirty( | |
2154 | memory_region_get_ram_addr(mr) + addr, size, client); | |
2155 | } | |
2156 | ||
2157 | int memory_region_get_fd(MemoryRegion *mr) | |
2158 | { | |
2159 | int fd; | |
2160 | ||
2161 | RCU_READ_LOCK_GUARD(); | |
2162 | while (mr->alias) { | |
2163 | mr = mr->alias; | |
2164 | } | |
2165 | fd = mr->ram_block->fd; | |
2166 | ||
2167 | return fd; | |
2168 | } | |
2169 | ||
2170 | void *memory_region_get_ram_ptr(MemoryRegion *mr) | |
2171 | { | |
2172 | void *ptr; | |
2173 | uint64_t offset = 0; | |
2174 | ||
2175 | RCU_READ_LOCK_GUARD(); | |
2176 | while (mr->alias) { | |
2177 | offset += mr->alias_offset; | |
2178 | mr = mr->alias; | |
2179 | } | |
2180 | assert(mr->ram_block); | |
2181 | ptr = qemu_map_ram_ptr(mr->ram_block, offset); | |
2182 | ||
2183 | return ptr; | |
2184 | } | |
2185 | ||
2186 | MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset) | |
2187 | { | |
2188 | RAMBlock *block; | |
2189 | ||
2190 | block = qemu_ram_block_from_host(ptr, false, offset); | |
2191 | if (!block) { | |
2192 | return NULL; | |
2193 | } | |
2194 | ||
2195 | return block->mr; | |
2196 | } | |
2197 | ||
2198 | ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) | |
2199 | { | |
2200 | return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID; | |
2201 | } | |
2202 | ||
2203 | void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp) | |
2204 | { | |
2205 | assert(mr->ram_block); | |
2206 | ||
2207 | qemu_ram_resize(mr->ram_block, newsize, errp); | |
2208 | } | |
2209 | ||
2210 | ||
2211 | void memory_region_do_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size) | |
2212 | { | |
2213 | /* | |
2214 | * Might be extended case needed to cover | |
2215 | * different types of memory regions | |
2216 | */ | |
2217 | if (mr->ram_block && mr->dirty_log_mask) { | |
2218 | qemu_ram_writeback(mr->ram_block, addr, size); | |
2219 | } | |
2220 | } | |
2221 | ||
2222 | /* | |
2223 | * Call proper memory listeners about the change on the newly | |
2224 | * added/removed CoalescedMemoryRange. | |
2225 | */ | |
2226 | static void memory_region_update_coalesced_range(MemoryRegion *mr, | |
2227 | CoalescedMemoryRange *cmr, | |
2228 | bool add) | |
2229 | { | |
2230 | AddressSpace *as; | |
2231 | FlatView *view; | |
2232 | FlatRange *fr; | |
2233 | ||
2234 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
2235 | view = address_space_get_flatview(as); | |
2236 | FOR_EACH_FLAT_RANGE(fr, view) { | |
2237 | if (fr->mr == mr) { | |
2238 | flat_range_coalesced_io_notify(fr, as, cmr, add); | |
2239 | } | |
2240 | } | |
2241 | flatview_unref(view); | |
2242 | } | |
2243 | } | |
2244 | ||
2245 | void memory_region_set_coalescing(MemoryRegion *mr) | |
2246 | { | |
2247 | memory_region_clear_coalescing(mr); | |
2248 | memory_region_add_coalescing(mr, 0, int128_get64(mr->size)); | |
2249 | } | |
2250 | ||
2251 | void memory_region_add_coalescing(MemoryRegion *mr, | |
2252 | hwaddr offset, | |
2253 | uint64_t size) | |
2254 | { | |
2255 | CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr)); | |
2256 | ||
2257 | cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size)); | |
2258 | QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); | |
2259 | memory_region_update_coalesced_range(mr, cmr, true); | |
2260 | memory_region_set_flush_coalesced(mr); | |
2261 | } | |
2262 | ||
2263 | void memory_region_clear_coalescing(MemoryRegion *mr) | |
2264 | { | |
2265 | CoalescedMemoryRange *cmr; | |
2266 | ||
2267 | if (QTAILQ_EMPTY(&mr->coalesced)) { | |
2268 | return; | |
2269 | } | |
2270 | ||
2271 | qemu_flush_coalesced_mmio_buffer(); | |
2272 | mr->flush_coalesced_mmio = false; | |
2273 | ||
2274 | while (!QTAILQ_EMPTY(&mr->coalesced)) { | |
2275 | cmr = QTAILQ_FIRST(&mr->coalesced); | |
2276 | QTAILQ_REMOVE(&mr->coalesced, cmr, link); | |
2277 | memory_region_update_coalesced_range(mr, cmr, false); | |
2278 | g_free(cmr); | |
2279 | } | |
2280 | } | |
2281 | ||
2282 | void memory_region_set_flush_coalesced(MemoryRegion *mr) | |
2283 | { | |
2284 | mr->flush_coalesced_mmio = true; | |
2285 | } | |
2286 | ||
2287 | void memory_region_clear_flush_coalesced(MemoryRegion *mr) | |
2288 | { | |
2289 | qemu_flush_coalesced_mmio_buffer(); | |
2290 | if (QTAILQ_EMPTY(&mr->coalesced)) { | |
2291 | mr->flush_coalesced_mmio = false; | |
2292 | } | |
2293 | } | |
2294 | ||
2295 | void memory_region_clear_global_locking(MemoryRegion *mr) | |
2296 | { | |
2297 | mr->global_locking = false; | |
2298 | } | |
2299 | ||
2300 | static bool userspace_eventfd_warning; | |
2301 | ||
2302 | void memory_region_add_eventfd(MemoryRegion *mr, | |
2303 | hwaddr addr, | |
2304 | unsigned size, | |
2305 | bool match_data, | |
2306 | uint64_t data, | |
2307 | EventNotifier *e) | |
2308 | { | |
2309 | MemoryRegionIoeventfd mrfd = { | |
2310 | .addr.start = int128_make64(addr), | |
2311 | .addr.size = int128_make64(size), | |
2312 | .match_data = match_data, | |
2313 | .data = data, | |
2314 | .e = e, | |
2315 | }; | |
2316 | unsigned i; | |
2317 | ||
2318 | if (kvm_enabled() && (!(kvm_eventfds_enabled() || | |
2319 | userspace_eventfd_warning))) { | |
2320 | userspace_eventfd_warning = true; | |
2321 | error_report("Using eventfd without MMIO binding in KVM. " | |
2322 | "Suboptimal performance expected"); | |
2323 | } | |
2324 | ||
2325 | if (size) { | |
2326 | adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); | |
2327 | } | |
2328 | memory_region_transaction_begin(); | |
2329 | for (i = 0; i < mr->ioeventfd_nb; ++i) { | |
2330 | if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) { | |
2331 | break; | |
2332 | } | |
2333 | } | |
2334 | ++mr->ioeventfd_nb; | |
2335 | mr->ioeventfds = g_realloc(mr->ioeventfds, | |
2336 | sizeof(*mr->ioeventfds) * mr->ioeventfd_nb); | |
2337 | memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i], | |
2338 | sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); | |
2339 | mr->ioeventfds[i] = mrfd; | |
2340 | ioeventfd_update_pending |= mr->enabled; | |
2341 | memory_region_transaction_commit(); | |
2342 | } | |
2343 | ||
2344 | void memory_region_del_eventfd(MemoryRegion *mr, | |
2345 | hwaddr addr, | |
2346 | unsigned size, | |
2347 | bool match_data, | |
2348 | uint64_t data, | |
2349 | EventNotifier *e) | |
2350 | { | |
2351 | MemoryRegionIoeventfd mrfd = { | |
2352 | .addr.start = int128_make64(addr), | |
2353 | .addr.size = int128_make64(size), | |
2354 | .match_data = match_data, | |
2355 | .data = data, | |
2356 | .e = e, | |
2357 | }; | |
2358 | unsigned i; | |
2359 | ||
2360 | if (size) { | |
2361 | adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); | |
2362 | } | |
2363 | memory_region_transaction_begin(); | |
2364 | for (i = 0; i < mr->ioeventfd_nb; ++i) { | |
2365 | if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) { | |
2366 | break; | |
2367 | } | |
2368 | } | |
2369 | assert(i != mr->ioeventfd_nb); | |
2370 | memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1], | |
2371 | sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); | |
2372 | --mr->ioeventfd_nb; | |
2373 | mr->ioeventfds = g_realloc(mr->ioeventfds, | |
2374 | sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); | |
2375 | ioeventfd_update_pending |= mr->enabled; | |
2376 | memory_region_transaction_commit(); | |
2377 | } | |
2378 | ||
2379 | static void memory_region_update_container_subregions(MemoryRegion *subregion) | |
2380 | { | |
2381 | MemoryRegion *mr = subregion->container; | |
2382 | MemoryRegion *other; | |
2383 | ||
2384 | memory_region_transaction_begin(); | |
2385 | ||
2386 | memory_region_ref(subregion); | |
2387 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { | |
2388 | if (subregion->priority >= other->priority) { | |
2389 | QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); | |
2390 | goto done; | |
2391 | } | |
2392 | } | |
2393 | QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); | |
2394 | done: | |
2395 | memory_region_update_pending |= mr->enabled && subregion->enabled; | |
2396 | memory_region_transaction_commit(); | |
2397 | } | |
2398 | ||
2399 | static void memory_region_add_subregion_common(MemoryRegion *mr, | |
2400 | hwaddr offset, | |
2401 | MemoryRegion *subregion) | |
2402 | { | |
2403 | assert(!subregion->container); | |
2404 | subregion->container = mr; | |
2405 | subregion->addr = offset; | |
2406 | memory_region_update_container_subregions(subregion); | |
2407 | } | |
2408 | ||
2409 | void memory_region_add_subregion(MemoryRegion *mr, | |
2410 | hwaddr offset, | |
2411 | MemoryRegion *subregion) | |
2412 | { | |
2413 | subregion->priority = 0; | |
2414 | memory_region_add_subregion_common(mr, offset, subregion); | |
2415 | } | |
2416 | ||
2417 | void memory_region_add_subregion_overlap(MemoryRegion *mr, | |
2418 | hwaddr offset, | |
2419 | MemoryRegion *subregion, | |
2420 | int priority) | |
2421 | { | |
2422 | subregion->priority = priority; | |
2423 | memory_region_add_subregion_common(mr, offset, subregion); | |
2424 | } | |
2425 | ||
2426 | void memory_region_del_subregion(MemoryRegion *mr, | |
2427 | MemoryRegion *subregion) | |
2428 | { | |
2429 | memory_region_transaction_begin(); | |
2430 | assert(subregion->container == mr); | |
2431 | subregion->container = NULL; | |
2432 | QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); | |
2433 | memory_region_unref(subregion); | |
2434 | memory_region_update_pending |= mr->enabled && subregion->enabled; | |
2435 | memory_region_transaction_commit(); | |
2436 | } | |
2437 | ||
2438 | void memory_region_set_enabled(MemoryRegion *mr, bool enabled) | |
2439 | { | |
2440 | if (enabled == mr->enabled) { | |
2441 | return; | |
2442 | } | |
2443 | memory_region_transaction_begin(); | |
2444 | mr->enabled = enabled; | |
2445 | memory_region_update_pending = true; | |
2446 | memory_region_transaction_commit(); | |
2447 | } | |
2448 | ||
2449 | void memory_region_set_size(MemoryRegion *mr, uint64_t size) | |
2450 | { | |
2451 | Int128 s = int128_make64(size); | |
2452 | ||
2453 | if (size == UINT64_MAX) { | |
2454 | s = int128_2_64(); | |
2455 | } | |
2456 | if (int128_eq(s, mr->size)) { | |
2457 | return; | |
2458 | } | |
2459 | memory_region_transaction_begin(); | |
2460 | mr->size = s; | |
2461 | memory_region_update_pending = true; | |
2462 | memory_region_transaction_commit(); | |
2463 | } | |
2464 | ||
2465 | static void memory_region_readd_subregion(MemoryRegion *mr) | |
2466 | { | |
2467 | MemoryRegion *container = mr->container; | |
2468 | ||
2469 | if (container) { | |
2470 | memory_region_transaction_begin(); | |
2471 | memory_region_ref(mr); | |
2472 | memory_region_del_subregion(container, mr); | |
2473 | mr->container = container; | |
2474 | memory_region_update_container_subregions(mr); | |
2475 | memory_region_unref(mr); | |
2476 | memory_region_transaction_commit(); | |
2477 | } | |
2478 | } | |
2479 | ||
2480 | void memory_region_set_address(MemoryRegion *mr, hwaddr addr) | |
2481 | { | |
2482 | if (addr != mr->addr) { | |
2483 | mr->addr = addr; | |
2484 | memory_region_readd_subregion(mr); | |
2485 | } | |
2486 | } | |
2487 | ||
2488 | void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset) | |
2489 | { | |
2490 | assert(mr->alias); | |
2491 | ||
2492 | if (offset == mr->alias_offset) { | |
2493 | return; | |
2494 | } | |
2495 | ||
2496 | memory_region_transaction_begin(); | |
2497 | mr->alias_offset = offset; | |
2498 | memory_region_update_pending |= mr->enabled; | |
2499 | memory_region_transaction_commit(); | |
2500 | } | |
2501 | ||
2502 | uint64_t memory_region_get_alignment(const MemoryRegion *mr) | |
2503 | { | |
2504 | return mr->align; | |
2505 | } | |
2506 | ||
2507 | static int cmp_flatrange_addr(const void *addr_, const void *fr_) | |
2508 | { | |
2509 | const AddrRange *addr = addr_; | |
2510 | const FlatRange *fr = fr_; | |
2511 | ||
2512 | if (int128_le(addrrange_end(*addr), fr->addr.start)) { | |
2513 | return -1; | |
2514 | } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { | |
2515 | return 1; | |
2516 | } | |
2517 | return 0; | |
2518 | } | |
2519 | ||
2520 | static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) | |
2521 | { | |
2522 | return bsearch(&addr, view->ranges, view->nr, | |
2523 | sizeof(FlatRange), cmp_flatrange_addr); | |
2524 | } | |
2525 | ||
2526 | bool memory_region_is_mapped(MemoryRegion *mr) | |
2527 | { | |
2528 | return mr->container ? true : false; | |
2529 | } | |
2530 | ||
2531 | /* Same as memory_region_find, but it does not add a reference to the | |
2532 | * returned region. It must be called from an RCU critical section. | |
2533 | */ | |
2534 | static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr, | |
2535 | hwaddr addr, uint64_t size) | |
2536 | { | |
2537 | MemoryRegionSection ret = { .mr = NULL }; | |
2538 | MemoryRegion *root; | |
2539 | AddressSpace *as; | |
2540 | AddrRange range; | |
2541 | FlatView *view; | |
2542 | FlatRange *fr; | |
2543 | ||
2544 | addr += mr->addr; | |
2545 | for (root = mr; root->container; ) { | |
2546 | root = root->container; | |
2547 | addr += root->addr; | |
2548 | } | |
2549 | ||
2550 | as = memory_region_to_address_space(root); | |
2551 | if (!as) { | |
2552 | return ret; | |
2553 | } | |
2554 | range = addrrange_make(int128_make64(addr), int128_make64(size)); | |
2555 | ||
2556 | view = address_space_to_flatview(as); | |
2557 | fr = flatview_lookup(view, range); | |
2558 | if (!fr) { | |
2559 | return ret; | |
2560 | } | |
2561 | ||
2562 | while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { | |
2563 | --fr; | |
2564 | } | |
2565 | ||
2566 | ret.mr = fr->mr; | |
2567 | ret.fv = view; | |
2568 | range = addrrange_intersection(range, fr->addr); | |
2569 | ret.offset_within_region = fr->offset_in_region; | |
2570 | ret.offset_within_region += int128_get64(int128_sub(range.start, | |
2571 | fr->addr.start)); | |
2572 | ret.size = range.size; | |
2573 | ret.offset_within_address_space = int128_get64(range.start); | |
2574 | ret.readonly = fr->readonly; | |
2575 | ret.nonvolatile = fr->nonvolatile; | |
2576 | return ret; | |
2577 | } | |
2578 | ||
2579 | MemoryRegionSection memory_region_find(MemoryRegion *mr, | |
2580 | hwaddr addr, uint64_t size) | |
2581 | { | |
2582 | MemoryRegionSection ret; | |
2583 | RCU_READ_LOCK_GUARD(); | |
2584 | ret = memory_region_find_rcu(mr, addr, size); | |
2585 | if (ret.mr) { | |
2586 | memory_region_ref(ret.mr); | |
2587 | } | |
2588 | return ret; | |
2589 | } | |
2590 | ||
2591 | bool memory_region_present(MemoryRegion *container, hwaddr addr) | |
2592 | { | |
2593 | MemoryRegion *mr; | |
2594 | ||
2595 | RCU_READ_LOCK_GUARD(); | |
2596 | mr = memory_region_find_rcu(container, addr, 1).mr; | |
2597 | return mr && mr != container; | |
2598 | } | |
2599 | ||
2600 | void memory_global_dirty_log_sync(void) | |
2601 | { | |
2602 | memory_region_sync_dirty_bitmap(NULL); | |
2603 | } | |
2604 | ||
2605 | void memory_global_after_dirty_log_sync(void) | |
2606 | { | |
2607 | MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward); | |
2608 | } | |
2609 | ||
2610 | static VMChangeStateEntry *vmstate_change; | |
2611 | ||
2612 | void memory_global_dirty_log_start(void) | |
2613 | { | |
2614 | if (vmstate_change) { | |
2615 | qemu_del_vm_change_state_handler(vmstate_change); | |
2616 | vmstate_change = NULL; | |
2617 | } | |
2618 | ||
2619 | global_dirty_log = true; | |
2620 | ||
2621 | MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); | |
2622 | ||
2623 | /* Refresh DIRTY_MEMORY_MIGRATION bit. */ | |
2624 | memory_region_transaction_begin(); | |
2625 | memory_region_update_pending = true; | |
2626 | memory_region_transaction_commit(); | |
2627 | } | |
2628 | ||
2629 | static void memory_global_dirty_log_do_stop(void) | |
2630 | { | |
2631 | global_dirty_log = false; | |
2632 | ||
2633 | /* Refresh DIRTY_MEMORY_MIGRATION bit. */ | |
2634 | memory_region_transaction_begin(); | |
2635 | memory_region_update_pending = true; | |
2636 | memory_region_transaction_commit(); | |
2637 | ||
2638 | MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); | |
2639 | } | |
2640 | ||
2641 | static void memory_vm_change_state_handler(void *opaque, int running, | |
2642 | RunState state) | |
2643 | { | |
2644 | if (running) { | |
2645 | memory_global_dirty_log_do_stop(); | |
2646 | ||
2647 | if (vmstate_change) { | |
2648 | qemu_del_vm_change_state_handler(vmstate_change); | |
2649 | vmstate_change = NULL; | |
2650 | } | |
2651 | } | |
2652 | } | |
2653 | ||
2654 | void memory_global_dirty_log_stop(void) | |
2655 | { | |
2656 | if (!runstate_is_running()) { | |
2657 | if (vmstate_change) { | |
2658 | return; | |
2659 | } | |
2660 | vmstate_change = qemu_add_vm_change_state_handler( | |
2661 | memory_vm_change_state_handler, NULL); | |
2662 | return; | |
2663 | } | |
2664 | ||
2665 | memory_global_dirty_log_do_stop(); | |
2666 | } | |
2667 | ||
2668 | static void listener_add_address_space(MemoryListener *listener, | |
2669 | AddressSpace *as) | |
2670 | { | |
2671 | FlatView *view; | |
2672 | FlatRange *fr; | |
2673 | ||
2674 | if (listener->begin) { | |
2675 | listener->begin(listener); | |
2676 | } | |
2677 | if (global_dirty_log) { | |
2678 | if (listener->log_global_start) { | |
2679 | listener->log_global_start(listener); | |
2680 | } | |
2681 | } | |
2682 | ||
2683 | view = address_space_get_flatview(as); | |
2684 | FOR_EACH_FLAT_RANGE(fr, view) { | |
2685 | MemoryRegionSection section = section_from_flat_range(fr, view); | |
2686 | ||
2687 | if (listener->region_add) { | |
2688 | listener->region_add(listener, §ion); | |
2689 | } | |
2690 | if (fr->dirty_log_mask && listener->log_start) { | |
2691 | listener->log_start(listener, §ion, 0, fr->dirty_log_mask); | |
2692 | } | |
2693 | } | |
2694 | if (listener->commit) { | |
2695 | listener->commit(listener); | |
2696 | } | |
2697 | flatview_unref(view); | |
2698 | } | |
2699 | ||
2700 | static void listener_del_address_space(MemoryListener *listener, | |
2701 | AddressSpace *as) | |
2702 | { | |
2703 | FlatView *view; | |
2704 | FlatRange *fr; | |
2705 | ||
2706 | if (listener->begin) { | |
2707 | listener->begin(listener); | |
2708 | } | |
2709 | view = address_space_get_flatview(as); | |
2710 | FOR_EACH_FLAT_RANGE(fr, view) { | |
2711 | MemoryRegionSection section = section_from_flat_range(fr, view); | |
2712 | ||
2713 | if (fr->dirty_log_mask && listener->log_stop) { | |
2714 | listener->log_stop(listener, §ion, fr->dirty_log_mask, 0); | |
2715 | } | |
2716 | if (listener->region_del) { | |
2717 | listener->region_del(listener, §ion); | |
2718 | } | |
2719 | } | |
2720 | if (listener->commit) { | |
2721 | listener->commit(listener); | |
2722 | } | |
2723 | flatview_unref(view); | |
2724 | } | |
2725 | ||
2726 | void memory_listener_register(MemoryListener *listener, AddressSpace *as) | |
2727 | { | |
2728 | MemoryListener *other = NULL; | |
2729 | ||
2730 | listener->address_space = as; | |
2731 | if (QTAILQ_EMPTY(&memory_listeners) | |
2732 | || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) { | |
2733 | QTAILQ_INSERT_TAIL(&memory_listeners, listener, link); | |
2734 | } else { | |
2735 | QTAILQ_FOREACH(other, &memory_listeners, link) { | |
2736 | if (listener->priority < other->priority) { | |
2737 | break; | |
2738 | } | |
2739 | } | |
2740 | QTAILQ_INSERT_BEFORE(other, listener, link); | |
2741 | } | |
2742 | ||
2743 | if (QTAILQ_EMPTY(&as->listeners) | |
2744 | || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) { | |
2745 | QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as); | |
2746 | } else { | |
2747 | QTAILQ_FOREACH(other, &as->listeners, link_as) { | |
2748 | if (listener->priority < other->priority) { | |
2749 | break; | |
2750 | } | |
2751 | } | |
2752 | QTAILQ_INSERT_BEFORE(other, listener, link_as); | |
2753 | } | |
2754 | ||
2755 | listener_add_address_space(listener, as); | |
2756 | } | |
2757 | ||
2758 | void memory_listener_unregister(MemoryListener *listener) | |
2759 | { | |
2760 | if (!listener->address_space) { | |
2761 | return; | |
2762 | } | |
2763 | ||
2764 | listener_del_address_space(listener, listener->address_space); | |
2765 | QTAILQ_REMOVE(&memory_listeners, listener, link); | |
2766 | QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as); | |
2767 | listener->address_space = NULL; | |
2768 | } | |
2769 | ||
2770 | void address_space_remove_listeners(AddressSpace *as) | |
2771 | { | |
2772 | while (!QTAILQ_EMPTY(&as->listeners)) { | |
2773 | memory_listener_unregister(QTAILQ_FIRST(&as->listeners)); | |
2774 | } | |
2775 | } | |
2776 | ||
2777 | void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) | |
2778 | { | |
2779 | memory_region_ref(root); | |
2780 | as->root = root; | |
2781 | as->current_map = NULL; | |
2782 | as->ioeventfd_nb = 0; | |
2783 | as->ioeventfds = NULL; | |
2784 | QTAILQ_INIT(&as->listeners); | |
2785 | QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); | |
2786 | as->name = g_strdup(name ? name : "anonymous"); | |
2787 | address_space_update_topology(as); | |
2788 | address_space_update_ioeventfds(as); | |
2789 | } | |
2790 | ||
2791 | static void do_address_space_destroy(AddressSpace *as) | |
2792 | { | |
2793 | assert(QTAILQ_EMPTY(&as->listeners)); | |
2794 | ||
2795 | flatview_unref(as->current_map); | |
2796 | g_free(as->name); | |
2797 | g_free(as->ioeventfds); | |
2798 | memory_region_unref(as->root); | |
2799 | } | |
2800 | ||
2801 | void address_space_destroy(AddressSpace *as) | |
2802 | { | |
2803 | MemoryRegion *root = as->root; | |
2804 | ||
2805 | /* Flush out anything from MemoryListeners listening in on this */ | |
2806 | memory_region_transaction_begin(); | |
2807 | as->root = NULL; | |
2808 | memory_region_transaction_commit(); | |
2809 | QTAILQ_REMOVE(&address_spaces, as, address_spaces_link); | |
2810 | ||
2811 | /* At this point, as->dispatch and as->current_map are dummy | |
2812 | * entries that the guest should never use. Wait for the old | |
2813 | * values to expire before freeing the data. | |
2814 | */ | |
2815 | as->root = root; | |
2816 | call_rcu(as, do_address_space_destroy, rcu); | |
2817 | } | |
2818 | ||
2819 | static const char *memory_region_type(MemoryRegion *mr) | |
2820 | { | |
2821 | if (memory_region_is_ram_device(mr)) { | |
2822 | return "ramd"; | |
2823 | } else if (memory_region_is_romd(mr)) { | |
2824 | return "romd"; | |
2825 | } else if (memory_region_is_rom(mr)) { | |
2826 | return "rom"; | |
2827 | } else if (memory_region_is_ram(mr)) { | |
2828 | return "ram"; | |
2829 | } else { | |
2830 | return "i/o"; | |
2831 | } | |
2832 | } | |
2833 | ||
2834 | typedef struct MemoryRegionList MemoryRegionList; | |
2835 | ||
2836 | struct MemoryRegionList { | |
2837 | const MemoryRegion *mr; | |
2838 | QTAILQ_ENTRY(MemoryRegionList) mrqueue; | |
2839 | }; | |
2840 | ||
2841 | typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead; | |
2842 | ||
2843 | #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ | |
2844 | int128_sub((size), int128_one())) : 0) | |
2845 | #define MTREE_INDENT " " | |
2846 | ||
2847 | static void mtree_expand_owner(const char *label, Object *obj) | |
2848 | { | |
2849 | DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE); | |
2850 | ||
2851 | qemu_printf(" %s:{%s", label, dev ? "dev" : "obj"); | |
2852 | if (dev && dev->id) { | |
2853 | qemu_printf(" id=%s", dev->id); | |
2854 | } else { | |
2855 | gchar *canonical_path = object_get_canonical_path(obj); | |
2856 | if (canonical_path) { | |
2857 | qemu_printf(" path=%s", canonical_path); | |
2858 | g_free(canonical_path); | |
2859 | } else { | |
2860 | qemu_printf(" type=%s", object_get_typename(obj)); | |
2861 | } | |
2862 | } | |
2863 | qemu_printf("}"); | |
2864 | } | |
2865 | ||
2866 | static void mtree_print_mr_owner(const MemoryRegion *mr) | |
2867 | { | |
2868 | Object *owner = mr->owner; | |
2869 | Object *parent = memory_region_owner((MemoryRegion *)mr); | |
2870 | ||
2871 | if (!owner && !parent) { | |
2872 | qemu_printf(" orphan"); | |
2873 | return; | |
2874 | } | |
2875 | if (owner) { | |
2876 | mtree_expand_owner("owner", owner); | |
2877 | } | |
2878 | if (parent && parent != owner) { | |
2879 | mtree_expand_owner("parent", parent); | |
2880 | } | |
2881 | } | |
2882 | ||
2883 | static void mtree_print_mr(const MemoryRegion *mr, unsigned int level, | |
2884 | hwaddr base, | |
2885 | MemoryRegionListHead *alias_print_queue, | |
2886 | bool owner) | |
2887 | { | |
2888 | MemoryRegionList *new_ml, *ml, *next_ml; | |
2889 | MemoryRegionListHead submr_print_queue; | |
2890 | const MemoryRegion *submr; | |
2891 | unsigned int i; | |
2892 | hwaddr cur_start, cur_end; | |
2893 | ||
2894 | if (!mr) { | |
2895 | return; | |
2896 | } | |
2897 | ||
2898 | for (i = 0; i < level; i++) { | |
2899 | qemu_printf(MTREE_INDENT); | |
2900 | } | |
2901 | ||
2902 | cur_start = base + mr->addr; | |
2903 | cur_end = cur_start + MR_SIZE(mr->size); | |
2904 | ||
2905 | /* | |
2906 | * Try to detect overflow of memory region. This should never | |
2907 | * happen normally. When it happens, we dump something to warn the | |
2908 | * user who is observing this. | |
2909 | */ | |
2910 | if (cur_start < base || cur_end < cur_start) { | |
2911 | qemu_printf("[DETECTED OVERFLOW!] "); | |
2912 | } | |
2913 | ||
2914 | if (mr->alias) { | |
2915 | MemoryRegionList *ml; | |
2916 | bool found = false; | |
2917 | ||
2918 | /* check if the alias is already in the queue */ | |
2919 | QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) { | |
2920 | if (ml->mr == mr->alias) { | |
2921 | found = true; | |
2922 | } | |
2923 | } | |
2924 | ||
2925 | if (!found) { | |
2926 | ml = g_new(MemoryRegionList, 1); | |
2927 | ml->mr = mr->alias; | |
2928 | QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue); | |
2929 | } | |
2930 | qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx | |
2931 | " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx | |
2932 | "-" TARGET_FMT_plx "%s", | |
2933 | cur_start, cur_end, | |
2934 | mr->priority, | |
2935 | mr->nonvolatile ? "nv-" : "", | |
2936 | memory_region_type((MemoryRegion *)mr), | |
2937 | memory_region_name(mr), | |
2938 | memory_region_name(mr->alias), | |
2939 | mr->alias_offset, | |
2940 | mr->alias_offset + MR_SIZE(mr->size), | |
2941 | mr->enabled ? "" : " [disabled]"); | |
2942 | if (owner) { | |
2943 | mtree_print_mr_owner(mr); | |
2944 | } | |
2945 | } else { | |
2946 | qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx | |
2947 | " (prio %d, %s%s): %s%s", | |
2948 | cur_start, cur_end, | |
2949 | mr->priority, | |
2950 | mr->nonvolatile ? "nv-" : "", | |
2951 | memory_region_type((MemoryRegion *)mr), | |
2952 | memory_region_name(mr), | |
2953 | mr->enabled ? "" : " [disabled]"); | |
2954 | if (owner) { | |
2955 | mtree_print_mr_owner(mr); | |
2956 | } | |
2957 | } | |
2958 | qemu_printf("\n"); | |
2959 | ||
2960 | QTAILQ_INIT(&submr_print_queue); | |
2961 | ||
2962 | QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { | |
2963 | new_ml = g_new(MemoryRegionList, 1); | |
2964 | new_ml->mr = submr; | |
2965 | QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { | |
2966 | if (new_ml->mr->addr < ml->mr->addr || | |
2967 | (new_ml->mr->addr == ml->mr->addr && | |
2968 | new_ml->mr->priority > ml->mr->priority)) { | |
2969 | QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue); | |
2970 | new_ml = NULL; | |
2971 | break; | |
2972 | } | |
2973 | } | |
2974 | if (new_ml) { | |
2975 | QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue); | |
2976 | } | |
2977 | } | |
2978 | ||
2979 | QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { | |
2980 | mtree_print_mr(ml->mr, level + 1, cur_start, | |
2981 | alias_print_queue, owner); | |
2982 | } | |
2983 | ||
2984 | QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) { | |
2985 | g_free(ml); | |
2986 | } | |
2987 | } | |
2988 | ||
2989 | struct FlatViewInfo { | |
2990 | int counter; | |
2991 | bool dispatch_tree; | |
2992 | bool owner; | |
2993 | AccelClass *ac; | |
2994 | }; | |
2995 | ||
2996 | static void mtree_print_flatview(gpointer key, gpointer value, | |
2997 | gpointer user_data) | |
2998 | { | |
2999 | FlatView *view = key; | |
3000 | GArray *fv_address_spaces = value; | |
3001 | struct FlatViewInfo *fvi = user_data; | |
3002 | FlatRange *range = &view->ranges[0]; | |
3003 | MemoryRegion *mr; | |
3004 | int n = view->nr; | |
3005 | int i; | |
3006 | AddressSpace *as; | |
3007 | ||
3008 | qemu_printf("FlatView #%d\n", fvi->counter); | |
3009 | ++fvi->counter; | |
3010 | ||
3011 | for (i = 0; i < fv_address_spaces->len; ++i) { | |
3012 | as = g_array_index(fv_address_spaces, AddressSpace*, i); | |
3013 | qemu_printf(" AS \"%s\", root: %s", | |
3014 | as->name, memory_region_name(as->root)); | |
3015 | if (as->root->alias) { | |
3016 | qemu_printf(", alias %s", memory_region_name(as->root->alias)); | |
3017 | } | |
3018 | qemu_printf("\n"); | |
3019 | } | |
3020 | ||
3021 | qemu_printf(" Root memory region: %s\n", | |
3022 | view->root ? memory_region_name(view->root) : "(none)"); | |
3023 | ||
3024 | if (n <= 0) { | |
3025 | qemu_printf(MTREE_INDENT "No rendered FlatView\n\n"); | |
3026 | return; | |
3027 | } | |
3028 | ||
3029 | while (n--) { | |
3030 | mr = range->mr; | |
3031 | if (range->offset_in_region) { | |
3032 | qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx | |
3033 | " (prio %d, %s%s): %s @" TARGET_FMT_plx, | |
3034 | int128_get64(range->addr.start), | |
3035 | int128_get64(range->addr.start) | |
3036 | + MR_SIZE(range->addr.size), | |
3037 | mr->priority, | |
3038 | range->nonvolatile ? "nv-" : "", | |
3039 | range->readonly ? "rom" : memory_region_type(mr), | |
3040 | memory_region_name(mr), | |
3041 | range->offset_in_region); | |
3042 | } else { | |
3043 | qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx | |
3044 | " (prio %d, %s%s): %s", | |
3045 | int128_get64(range->addr.start), | |
3046 | int128_get64(range->addr.start) | |
3047 | + MR_SIZE(range->addr.size), | |
3048 | mr->priority, | |
3049 | range->nonvolatile ? "nv-" : "", | |
3050 | range->readonly ? "rom" : memory_region_type(mr), | |
3051 | memory_region_name(mr)); | |
3052 | } | |
3053 | if (fvi->owner) { | |
3054 | mtree_print_mr_owner(mr); | |
3055 | } | |
3056 | ||
3057 | if (fvi->ac) { | |
3058 | for (i = 0; i < fv_address_spaces->len; ++i) { | |
3059 | as = g_array_index(fv_address_spaces, AddressSpace*, i); | |
3060 | if (fvi->ac->has_memory(current_machine, as, | |
3061 | int128_get64(range->addr.start), | |
3062 | MR_SIZE(range->addr.size) + 1)) { | |
3063 | qemu_printf(" %s", fvi->ac->name); | |
3064 | } | |
3065 | } | |
3066 | } | |
3067 | qemu_printf("\n"); | |
3068 | range++; | |
3069 | } | |
3070 | ||
3071 | #if !defined(CONFIG_USER_ONLY) | |
3072 | if (fvi->dispatch_tree && view->root) { | |
3073 | mtree_print_dispatch(view->dispatch, view->root); | |
3074 | } | |
3075 | #endif | |
3076 | ||
3077 | qemu_printf("\n"); | |
3078 | } | |
3079 | ||
3080 | static gboolean mtree_info_flatview_free(gpointer key, gpointer value, | |
3081 | gpointer user_data) | |
3082 | { | |
3083 | FlatView *view = key; | |
3084 | GArray *fv_address_spaces = value; | |
3085 | ||
3086 | g_array_unref(fv_address_spaces); | |
3087 | flatview_unref(view); | |
3088 | ||
3089 | return true; | |
3090 | } | |
3091 | ||
3092 | void mtree_info(bool flatview, bool dispatch_tree, bool owner) | |
3093 | { | |
3094 | MemoryRegionListHead ml_head; | |
3095 | MemoryRegionList *ml, *ml2; | |
3096 | AddressSpace *as; | |
3097 | ||
3098 | if (flatview) { | |
3099 | FlatView *view; | |
3100 | struct FlatViewInfo fvi = { | |
3101 | .counter = 0, | |
3102 | .dispatch_tree = dispatch_tree, | |
3103 | .owner = owner, | |
3104 | }; | |
3105 | GArray *fv_address_spaces; | |
3106 | GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal); | |
3107 | AccelClass *ac = ACCEL_GET_CLASS(current_machine->accelerator); | |
3108 | ||
3109 | if (ac->has_memory) { | |
3110 | fvi.ac = ac; | |
3111 | } | |
3112 | ||
3113 | /* Gather all FVs in one table */ | |
3114 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
3115 | view = address_space_get_flatview(as); | |
3116 | ||
3117 | fv_address_spaces = g_hash_table_lookup(views, view); | |
3118 | if (!fv_address_spaces) { | |
3119 | fv_address_spaces = g_array_new(false, false, sizeof(as)); | |
3120 | g_hash_table_insert(views, view, fv_address_spaces); | |
3121 | } | |
3122 | ||
3123 | g_array_append_val(fv_address_spaces, as); | |
3124 | } | |
3125 | ||
3126 | /* Print */ | |
3127 | g_hash_table_foreach(views, mtree_print_flatview, &fvi); | |
3128 | ||
3129 | /* Free */ | |
3130 | g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0); | |
3131 | g_hash_table_unref(views); | |
3132 | ||
3133 | return; | |
3134 | } | |
3135 | ||
3136 | QTAILQ_INIT(&ml_head); | |
3137 | ||
3138 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
3139 | qemu_printf("address-space: %s\n", as->name); | |
3140 | mtree_print_mr(as->root, 1, 0, &ml_head, owner); | |
3141 | qemu_printf("\n"); | |
3142 | } | |
3143 | ||
3144 | /* print aliased regions */ | |
3145 | QTAILQ_FOREACH(ml, &ml_head, mrqueue) { | |
3146 | qemu_printf("memory-region: %s\n", memory_region_name(ml->mr)); | |
3147 | mtree_print_mr(ml->mr, 1, 0, &ml_head, owner); | |
3148 | qemu_printf("\n"); | |
3149 | } | |
3150 | ||
3151 | QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) { | |
3152 | g_free(ml); | |
3153 | } | |
3154 | } | |
3155 | ||
3156 | void memory_region_init_ram(MemoryRegion *mr, | |
3157 | struct Object *owner, | |
3158 | const char *name, | |
3159 | uint64_t size, | |
3160 | Error **errp) | |
3161 | { | |
3162 | DeviceState *owner_dev; | |
3163 | Error *err = NULL; | |
3164 | ||
3165 | memory_region_init_ram_nomigrate(mr, owner, name, size, &err); | |
3166 | if (err) { | |
3167 | error_propagate(errp, err); | |
3168 | return; | |
3169 | } | |
3170 | /* This will assert if owner is neither NULL nor a DeviceState. | |
3171 | * We only want the owner here for the purposes of defining a | |
3172 | * unique name for migration. TODO: Ideally we should implement | |
3173 | * a naming scheme for Objects which are not DeviceStates, in | |
3174 | * which case we can relax this restriction. | |
3175 | */ | |
3176 | owner_dev = DEVICE(owner); | |
3177 | vmstate_register_ram(mr, owner_dev); | |
3178 | } | |
3179 | ||
3180 | void memory_region_init_rom(MemoryRegion *mr, | |
3181 | struct Object *owner, | |
3182 | const char *name, | |
3183 | uint64_t size, | |
3184 | Error **errp) | |
3185 | { | |
3186 | DeviceState *owner_dev; | |
3187 | Error *err = NULL; | |
3188 | ||
3189 | memory_region_init_rom_nomigrate(mr, owner, name, size, &err); | |
3190 | if (err) { | |
3191 | error_propagate(errp, err); | |
3192 | return; | |
3193 | } | |
3194 | /* This will assert if owner is neither NULL nor a DeviceState. | |
3195 | * We only want the owner here for the purposes of defining a | |
3196 | * unique name for migration. TODO: Ideally we should implement | |
3197 | * a naming scheme for Objects which are not DeviceStates, in | |
3198 | * which case we can relax this restriction. | |
3199 | */ | |
3200 | owner_dev = DEVICE(owner); | |
3201 | vmstate_register_ram(mr, owner_dev); | |
3202 | } | |
3203 | ||
3204 | void memory_region_init_rom_device(MemoryRegion *mr, | |
3205 | struct Object *owner, | |
3206 | const MemoryRegionOps *ops, | |
3207 | void *opaque, | |
3208 | const char *name, | |
3209 | uint64_t size, | |
3210 | Error **errp) | |
3211 | { | |
3212 | DeviceState *owner_dev; | |
3213 | Error *err = NULL; | |
3214 | ||
3215 | memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque, | |
3216 | name, size, &err); | |
3217 | if (err) { | |
3218 | error_propagate(errp, err); | |
3219 | return; | |
3220 | } | |
3221 | /* This will assert if owner is neither NULL nor a DeviceState. | |
3222 | * We only want the owner here for the purposes of defining a | |
3223 | * unique name for migration. TODO: Ideally we should implement | |
3224 | * a naming scheme for Objects which are not DeviceStates, in | |
3225 | * which case we can relax this restriction. | |
3226 | */ | |
3227 | owner_dev = DEVICE(owner); | |
3228 | vmstate_register_ram(mr, owner_dev); | |
3229 | } | |
3230 | ||
3231 | static const TypeInfo memory_region_info = { | |
3232 | .parent = TYPE_OBJECT, | |
3233 | .name = TYPE_MEMORY_REGION, | |
3234 | .class_size = sizeof(MemoryRegionClass), | |
3235 | .instance_size = sizeof(MemoryRegion), | |
3236 | .instance_init = memory_region_initfn, | |
3237 | .instance_finalize = memory_region_finalize, | |
3238 | }; | |
3239 | ||
3240 | static const TypeInfo iommu_memory_region_info = { | |
3241 | .parent = TYPE_MEMORY_REGION, | |
3242 | .name = TYPE_IOMMU_MEMORY_REGION, | |
3243 | .class_size = sizeof(IOMMUMemoryRegionClass), | |
3244 | .instance_size = sizeof(IOMMUMemoryRegion), | |
3245 | .instance_init = iommu_memory_region_initfn, | |
3246 | .abstract = true, | |
3247 | }; | |
3248 | ||
3249 | static void memory_register_types(void) | |
3250 | { | |
3251 | type_register_static(&memory_region_info); | |
3252 | type_register_static(&iommu_memory_region_info); | |
3253 | } | |
3254 | ||
3255 | type_init(memory_register_types) |