2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
19 #include "exec/memory.h"
20 #include "exec/address-spaces.h"
21 #include "qapi/visitor.h"
22 #include "qemu/bitops.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
25 #include "qemu/qemu-print.h"
26 #include "qom/object.h"
27 #include "trace-root.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/tcg.h"
34 #include "sysemu/accel.h"
35 #include "hw/boards.h"
36 #include "migration/vmstate.h"
38 //#define DEBUG_UNASSIGNED
40 static unsigned memory_region_transaction_depth;
41 static bool memory_region_update_pending;
42 static bool ioeventfd_update_pending;
43 bool global_dirty_log;
45 static QTAILQ_HEAD(, MemoryListener) memory_listeners
46 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
48 static QTAILQ_HEAD(, AddressSpace) address_spaces
49 = QTAILQ_HEAD_INITIALIZER(address_spaces);
51 static GHashTable *flat_views;
53 typedef struct AddrRange AddrRange;
56 * Note that signed integers are needed for negative offsetting in aliases
57 * (large MemoryRegion::alias_offset).
64 static AddrRange addrrange_make(Int128 start, Int128 size)
66 return (AddrRange) { start, size };
69 static bool addrrange_equal(AddrRange r1, AddrRange r2)
71 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
74 static Int128 addrrange_end(AddrRange r)
76 return int128_add(r.start, r.size);
79 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
81 int128_addto(&range.start, delta);
85 static bool addrrange_contains(AddrRange range, Int128 addr)
87 return int128_ge(addr, range.start)
88 && int128_lt(addr, addrrange_end(range));
91 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
93 return addrrange_contains(r1, r2.start)
94 || addrrange_contains(r2, r1.start);
97 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
99 Int128 start = int128_max(r1.start, r2.start);
100 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
101 return addrrange_make(start, int128_sub(end, start));
104 enum ListenerDirection { Forward, Reverse };
106 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
108 MemoryListener *_listener; \
110 switch (_direction) { \
112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
113 if (_listener->_callback) { \
114 _listener->_callback(_listener, ##_args); \
119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
130 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
132 MemoryListener *_listener; \
134 switch (_direction) { \
136 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
137 if (_listener->_callback) { \
138 _listener->_callback(_listener, _section, ##_args); \
143 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
144 if (_listener->_callback) { \
145 _listener->_callback(_listener, _section, ##_args); \
154 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
155 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
162 struct CoalescedMemoryRange {
164 QTAILQ_ENTRY(CoalescedMemoryRange) link;
167 struct MemoryRegionIoeventfd {
174 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
175 MemoryRegionIoeventfd *b)
177 if (int128_lt(a->addr.start, b->addr.start)) {
179 } else if (int128_gt(a->addr.start, b->addr.start)) {
181 } else if (int128_lt(a->addr.size, b->addr.size)) {
183 } else if (int128_gt(a->addr.size, b->addr.size)) {
185 } else if (a->match_data < b->match_data) {
187 } else if (a->match_data > b->match_data) {
189 } else if (a->match_data) {
190 if (a->data < b->data) {
192 } else if (a->data > b->data) {
198 } else if (a->e > b->e) {
204 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
205 MemoryRegionIoeventfd *b)
207 return !memory_region_ioeventfd_before(a, b)
208 && !memory_region_ioeventfd_before(b, a);
211 /* Range of memory in the global map. Addresses are absolute. */
214 hwaddr offset_in_region;
216 uint8_t dirty_log_mask;
220 int has_coalesced_range;
223 #define FOR_EACH_FLAT_RANGE(var, view) \
224 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
226 static inline MemoryRegionSection
227 section_from_flat_range(FlatRange *fr, FlatView *fv)
229 return (MemoryRegionSection) {
232 .offset_within_region = fr->offset_in_region,
233 .size = fr->addr.size,
234 .offset_within_address_space = int128_get64(fr->addr.start),
235 .readonly = fr->readonly,
236 .nonvolatile = fr->nonvolatile,
240 static bool flatrange_equal(FlatRange *a, FlatRange *b)
242 return a->mr == b->mr
243 && addrrange_equal(a->addr, b->addr)
244 && a->offset_in_region == b->offset_in_region
245 && a->romd_mode == b->romd_mode
246 && a->readonly == b->readonly
247 && a->nonvolatile == b->nonvolatile;
250 static FlatView *flatview_new(MemoryRegion *mr_root)
254 view = g_new0(FlatView, 1);
256 view->root = mr_root;
257 memory_region_ref(mr_root);
258 trace_flatview_new(view, mr_root);
263 /* Insert a range into a given position. Caller is responsible for maintaining
266 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
268 if (view->nr == view->nr_allocated) {
269 view->nr_allocated = MAX(2 * view->nr, 10);
270 view->ranges = g_realloc(view->ranges,
271 view->nr_allocated * sizeof(*view->ranges));
273 memmove(view->ranges + pos + 1, view->ranges + pos,
274 (view->nr - pos) * sizeof(FlatRange));
275 view->ranges[pos] = *range;
276 memory_region_ref(range->mr);
280 static void flatview_destroy(FlatView *view)
284 trace_flatview_destroy(view, view->root);
285 if (view->dispatch) {
286 address_space_dispatch_free(view->dispatch);
288 for (i = 0; i < view->nr; i++) {
289 memory_region_unref(view->ranges[i].mr);
291 g_free(view->ranges);
292 memory_region_unref(view->root);
296 static bool flatview_ref(FlatView *view)
298 return atomic_fetch_inc_nonzero(&view->ref) > 0;
301 void flatview_unref(FlatView *view)
303 if (atomic_fetch_dec(&view->ref) == 1) {
304 trace_flatview_destroy_rcu(view, view->root);
306 call_rcu(view, flatview_destroy, rcu);
310 static bool can_merge(FlatRange *r1, FlatRange *r2)
312 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
314 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
316 int128_make64(r2->offset_in_region))
317 && r1->dirty_log_mask == r2->dirty_log_mask
318 && r1->romd_mode == r2->romd_mode
319 && r1->readonly == r2->readonly
320 && r1->nonvolatile == r2->nonvolatile;
323 /* Attempt to simplify a view by merging adjacent ranges */
324 static void flatview_simplify(FlatView *view)
329 while (i < view->nr) {
332 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
333 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
337 for (k = i; k < j; k++) {
338 memory_region_unref(view->ranges[k].mr);
340 memmove(&view->ranges[i], &view->ranges[j],
341 (view->nr - j) * sizeof(view->ranges[j]));
346 static bool memory_region_big_endian(MemoryRegion *mr)
348 #ifdef TARGET_WORDS_BIGENDIAN
349 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
351 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
355 static bool memory_region_wrong_endianness(MemoryRegion *mr)
357 #ifdef TARGET_WORDS_BIGENDIAN
358 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
360 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
364 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
366 if (memory_region_wrong_endianness(mr)) {
371 *data = bswap16(*data);
374 *data = bswap32(*data);
377 *data = bswap64(*data);
385 static inline void memory_region_shift_read_access(uint64_t *value,
391 *value |= (tmp & mask) << shift;
393 *value |= (tmp & mask) >> -shift;
397 static inline uint64_t memory_region_shift_write_access(uint64_t *value,
404 tmp = (*value >> shift) & mask;
406 tmp = (*value << -shift) & mask;
412 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
415 hwaddr abs_addr = offset;
417 abs_addr += mr->addr;
418 for (root = mr; root->container; ) {
419 root = root->container;
420 abs_addr += root->addr;
426 static int get_cpu_index(void)
429 return current_cpu->cpu_index;
434 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
444 tmp = mr->ops->read(mr->opaque, addr, size);
446 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
447 } else if (mr == &io_mem_notdirty) {
448 /* Accesses to code which has previously been translated into a TB show
449 * up in the MMIO path, as accesses to the io_mem_notdirty
451 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
452 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
453 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
454 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
456 memory_region_shift_read_access(value, shift, mask, tmp);
460 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
471 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
473 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
474 } else if (mr == &io_mem_notdirty) {
475 /* Accesses to code which has previously been translated into a TB show
476 * up in the MMIO path, as accesses to the io_mem_notdirty
478 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
479 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
480 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
481 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
483 memory_region_shift_read_access(value, shift, mask, tmp);
487 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
495 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
498 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
499 } else if (mr == &io_mem_notdirty) {
500 /* Accesses to code which has previously been translated into a TB show
501 * up in the MMIO path, as accesses to the io_mem_notdirty
503 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
504 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
505 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
506 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
508 mr->ops->write(mr->opaque, addr, tmp, size);
512 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
520 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
523 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
524 } else if (mr == &io_mem_notdirty) {
525 /* Accesses to code which has previously been translated into a TB show
526 * up in the MMIO path, as accesses to the io_mem_notdirty
528 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
529 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
530 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
531 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
533 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
536 static MemTxResult access_with_adjusted_size(hwaddr addr,
539 unsigned access_size_min,
540 unsigned access_size_max,
541 MemTxResult (*access_fn)
552 uint64_t access_mask;
553 unsigned access_size;
555 MemTxResult r = MEMTX_OK;
557 if (!access_size_min) {
560 if (!access_size_max) {
564 /* FIXME: support unaligned access? */
565 access_size = MAX(MIN(size, access_size_max), access_size_min);
566 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
567 if (memory_region_big_endian(mr)) {
568 for (i = 0; i < size; i += access_size) {
569 r |= access_fn(mr, addr + i, value, access_size,
570 (size - access_size - i) * 8, access_mask, attrs);
573 for (i = 0; i < size; i += access_size) {
574 r |= access_fn(mr, addr + i, value, access_size, i * 8,
581 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
585 while (mr->container) {
588 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
589 if (mr == as->root) {
596 /* Render a memory region into the global view. Ranges in @view obscure
599 static void render_memory_region(FlatView *view,
606 MemoryRegion *subregion;
608 hwaddr offset_in_region;
618 int128_addto(&base, int128_make64(mr->addr));
619 readonly |= mr->readonly;
620 nonvolatile |= mr->nonvolatile;
622 tmp = addrrange_make(base, mr->size);
624 if (!addrrange_intersects(tmp, clip)) {
628 clip = addrrange_intersection(tmp, clip);
631 int128_subfrom(&base, int128_make64(mr->alias->addr));
632 int128_subfrom(&base, int128_make64(mr->alias_offset));
633 render_memory_region(view, mr->alias, base, clip,
634 readonly, nonvolatile);
638 /* Render subregions in priority order. */
639 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
640 render_memory_region(view, subregion, base, clip,
641 readonly, nonvolatile);
644 if (!mr->terminates) {
648 offset_in_region = int128_get64(int128_sub(clip.start, base));
653 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
654 fr.romd_mode = mr->romd_mode;
655 fr.readonly = readonly;
656 fr.nonvolatile = nonvolatile;
657 fr.has_coalesced_range = 0;
659 /* Render the region itself into any gaps left by the current view. */
660 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
661 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
664 if (int128_lt(base, view->ranges[i].addr.start)) {
665 now = int128_min(remain,
666 int128_sub(view->ranges[i].addr.start, base));
667 fr.offset_in_region = offset_in_region;
668 fr.addr = addrrange_make(base, now);
669 flatview_insert(view, i, &fr);
671 int128_addto(&base, now);
672 offset_in_region += int128_get64(now);
673 int128_subfrom(&remain, now);
675 now = int128_sub(int128_min(int128_add(base, remain),
676 addrrange_end(view->ranges[i].addr)),
678 int128_addto(&base, now);
679 offset_in_region += int128_get64(now);
680 int128_subfrom(&remain, now);
682 if (int128_nz(remain)) {
683 fr.offset_in_region = offset_in_region;
684 fr.addr = addrrange_make(base, remain);
685 flatview_insert(view, i, &fr);
689 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
691 while (mr->enabled) {
693 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
694 /* The alias is included in its entirety. Use it as
695 * the "real" root, so that we can share more FlatViews.
700 } else if (!mr->terminates) {
701 unsigned int found = 0;
702 MemoryRegion *child, *next = NULL;
703 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
704 if (child->enabled) {
709 if (!child->addr && int128_ge(mr->size, child->size)) {
710 /* A child is included in its entirety. If it's the only
711 * enabled one, use it in the hope of finding an alias down the
712 * way. This will also let us share FlatViews.
733 /* Render a memory topology into a list of disjoint absolute ranges. */
734 static FlatView *generate_memory_topology(MemoryRegion *mr)
739 view = flatview_new(mr);
742 render_memory_region(view, mr, int128_zero(),
743 addrrange_make(int128_zero(), int128_2_64()),
746 flatview_simplify(view);
748 view->dispatch = address_space_dispatch_new(view);
749 for (i = 0; i < view->nr; i++) {
750 MemoryRegionSection mrs =
751 section_from_flat_range(&view->ranges[i], view);
752 flatview_add_to_dispatch(view, &mrs);
754 address_space_dispatch_compact(view->dispatch);
755 g_hash_table_replace(flat_views, mr, view);
760 static void address_space_add_del_ioeventfds(AddressSpace *as,
761 MemoryRegionIoeventfd *fds_new,
763 MemoryRegionIoeventfd *fds_old,
767 MemoryRegionIoeventfd *fd;
768 MemoryRegionSection section;
770 /* Generate a symmetric difference of the old and new fd sets, adding
771 * and deleting as necessary.
775 while (iold < fds_old_nb || inew < fds_new_nb) {
776 if (iold < fds_old_nb
777 && (inew == fds_new_nb
778 || memory_region_ioeventfd_before(&fds_old[iold],
781 section = (MemoryRegionSection) {
782 .fv = address_space_to_flatview(as),
783 .offset_within_address_space = int128_get64(fd->addr.start),
784 .size = fd->addr.size,
786 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion,
787 fd->match_data, fd->data, fd->e);
789 } else if (inew < fds_new_nb
790 && (iold == fds_old_nb
791 || memory_region_ioeventfd_before(&fds_new[inew],
794 section = (MemoryRegionSection) {
795 .fv = address_space_to_flatview(as),
796 .offset_within_address_space = int128_get64(fd->addr.start),
797 .size = fd->addr.size,
799 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion,
800 fd->match_data, fd->data, fd->e);
809 FlatView *address_space_get_flatview(AddressSpace *as)
815 view = address_space_to_flatview(as);
816 /* If somebody has replaced as->current_map concurrently,
817 * flatview_ref returns false.
819 } while (!flatview_ref(view));
824 static void address_space_update_ioeventfds(AddressSpace *as)
828 unsigned ioeventfd_nb = 0;
829 MemoryRegionIoeventfd *ioeventfds = NULL;
833 view = address_space_get_flatview(as);
834 FOR_EACH_FLAT_RANGE(fr, view) {
835 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
836 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
837 int128_sub(fr->addr.start,
838 int128_make64(fr->offset_in_region)));
839 if (addrrange_intersects(fr->addr, tmp)) {
841 ioeventfds = g_realloc(ioeventfds,
842 ioeventfd_nb * sizeof(*ioeventfds));
843 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
844 ioeventfds[ioeventfd_nb-1].addr = tmp;
849 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
850 as->ioeventfds, as->ioeventfd_nb);
852 g_free(as->ioeventfds);
853 as->ioeventfds = ioeventfds;
854 as->ioeventfd_nb = ioeventfd_nb;
855 flatview_unref(view);
858 static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
860 if (!fr->has_coalesced_range) {
864 if (--fr->has_coalesced_range > 0) {
868 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
869 int128_get64(fr->addr.start),
870 int128_get64(fr->addr.size));
873 static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
875 MemoryRegion *mr = fr->mr;
876 CoalescedMemoryRange *cmr;
879 if (QTAILQ_EMPTY(&mr->coalesced)) {
883 if (fr->has_coalesced_range++) {
887 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
888 tmp = addrrange_shift(cmr->addr,
889 int128_sub(fr->addr.start,
890 int128_make64(fr->offset_in_region)));
891 if (!addrrange_intersects(tmp, fr->addr)) {
894 tmp = addrrange_intersection(tmp, fr->addr);
895 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
896 int128_get64(tmp.start),
897 int128_get64(tmp.size));
901 static void address_space_update_topology_pass(AddressSpace *as,
902 const FlatView *old_view,
903 const FlatView *new_view,
907 FlatRange *frold, *frnew;
909 /* Generate a symmetric difference of the old and new memory maps.
910 * Kill ranges in the old map, and instantiate ranges in the new map.
913 while (iold < old_view->nr || inew < new_view->nr) {
914 if (iold < old_view->nr) {
915 frold = &old_view->ranges[iold];
919 if (inew < new_view->nr) {
920 frnew = &new_view->ranges[inew];
927 || int128_lt(frold->addr.start, frnew->addr.start)
928 || (int128_eq(frold->addr.start, frnew->addr.start)
929 && !flatrange_equal(frold, frnew)))) {
930 /* In old but not in new, or in both but attributes changed. */
933 flat_range_coalesced_io_del(frold, as);
934 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
938 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
939 /* In both and unchanged (except logging may have changed) */
942 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
943 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
944 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
945 frold->dirty_log_mask,
946 frnew->dirty_log_mask);
948 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
949 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
950 frold->dirty_log_mask,
951 frnew->dirty_log_mask);
961 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
962 flat_range_coalesced_io_add(frnew, as);
970 static void flatviews_init(void)
972 static FlatView *empty_view;
978 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
979 (GDestroyNotify) flatview_unref);
981 empty_view = generate_memory_topology(NULL);
982 /* We keep it alive forever in the global variable. */
983 flatview_ref(empty_view);
985 g_hash_table_replace(flat_views, NULL, empty_view);
986 flatview_ref(empty_view);
990 static void flatviews_reset(void)
995 g_hash_table_unref(flat_views);
1000 /* Render unique FVs */
1001 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1002 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1004 if (g_hash_table_lookup(flat_views, physmr)) {
1008 generate_memory_topology(physmr);
1012 static void address_space_set_flatview(AddressSpace *as)
1014 FlatView *old_view = address_space_to_flatview(as);
1015 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1016 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1020 if (old_view == new_view) {
1025 flatview_ref(old_view);
1028 flatview_ref(new_view);
1030 if (!QTAILQ_EMPTY(&as->listeners)) {
1031 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1034 old_view2 = &tmpview;
1036 address_space_update_topology_pass(as, old_view2, new_view, false);
1037 address_space_update_topology_pass(as, old_view2, new_view, true);
1040 /* Writes are protected by the BQL. */
1041 atomic_rcu_set(&as->current_map, new_view);
1043 flatview_unref(old_view);
1046 /* Note that all the old MemoryRegions are still alive up to this
1047 * point. This relieves most MemoryListeners from the need to
1048 * ref/unref the MemoryRegions they get---unless they use them
1049 * outside the iothread mutex, in which case precise reference
1050 * counting is necessary.
1053 flatview_unref(old_view);
1057 static void address_space_update_topology(AddressSpace *as)
1059 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1062 if (!g_hash_table_lookup(flat_views, physmr)) {
1063 generate_memory_topology(physmr);
1065 address_space_set_flatview(as);
1068 void memory_region_transaction_begin(void)
1070 qemu_flush_coalesced_mmio_buffer();
1071 ++memory_region_transaction_depth;
1074 void memory_region_transaction_commit(void)
1078 assert(memory_region_transaction_depth);
1079 assert(qemu_mutex_iothread_locked());
1081 --memory_region_transaction_depth;
1082 if (!memory_region_transaction_depth) {
1083 if (memory_region_update_pending) {
1086 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1088 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1089 address_space_set_flatview(as);
1090 address_space_update_ioeventfds(as);
1092 memory_region_update_pending = false;
1093 ioeventfd_update_pending = false;
1094 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1095 } else if (ioeventfd_update_pending) {
1096 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1097 address_space_update_ioeventfds(as);
1099 ioeventfd_update_pending = false;
1104 static void memory_region_destructor_none(MemoryRegion *mr)
1108 static void memory_region_destructor_ram(MemoryRegion *mr)
1110 qemu_ram_free(mr->ram_block);
1113 static bool memory_region_need_escape(char c)
1115 return c == '/' || c == '[' || c == '\\' || c == ']';
1118 static char *memory_region_escape_name(const char *name)
1125 for (p = name; *p; p++) {
1126 bytes += memory_region_need_escape(*p) ? 4 : 1;
1128 if (bytes == p - name) {
1129 return g_memdup(name, bytes + 1);
1132 escaped = g_malloc(bytes + 1);
1133 for (p = name, q = escaped; *p; p++) {
1135 if (unlikely(memory_region_need_escape(c))) {
1138 *q++ = "0123456789abcdef"[c >> 4];
1139 c = "0123456789abcdef"[c & 15];
1147 static void memory_region_do_init(MemoryRegion *mr,
1152 mr->size = int128_make64(size);
1153 if (size == UINT64_MAX) {
1154 mr->size = int128_2_64();
1156 mr->name = g_strdup(name);
1158 mr->ram_block = NULL;
1161 char *escaped_name = memory_region_escape_name(name);
1162 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1165 owner = container_get(qdev_get_machine(), "/unattached");
1168 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1169 object_unref(OBJECT(mr));
1171 g_free(escaped_name);
1175 void memory_region_init(MemoryRegion *mr,
1180 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1181 memory_region_do_init(mr, owner, name, size);
1184 static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1185 void *opaque, Error **errp)
1187 MemoryRegion *mr = MEMORY_REGION(obj);
1188 uint64_t value = mr->addr;
1190 visit_type_uint64(v, name, &value, errp);
1193 static void memory_region_get_container(Object *obj, Visitor *v,
1194 const char *name, void *opaque,
1197 MemoryRegion *mr = MEMORY_REGION(obj);
1198 gchar *path = (gchar *)"";
1200 if (mr->container) {
1201 path = object_get_canonical_path(OBJECT(mr->container));
1203 visit_type_str(v, name, &path, errp);
1204 if (mr->container) {
1209 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1212 MemoryRegion *mr = MEMORY_REGION(obj);
1214 return OBJECT(mr->container);
1217 static void memory_region_get_priority(Object *obj, Visitor *v,
1218 const char *name, void *opaque,
1221 MemoryRegion *mr = MEMORY_REGION(obj);
1222 int32_t value = mr->priority;
1224 visit_type_int32(v, name, &value, errp);
1227 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1228 void *opaque, Error **errp)
1230 MemoryRegion *mr = MEMORY_REGION(obj);
1231 uint64_t value = memory_region_size(mr);
1233 visit_type_uint64(v, name, &value, errp);
1236 static void memory_region_initfn(Object *obj)
1238 MemoryRegion *mr = MEMORY_REGION(obj);
1241 mr->ops = &unassigned_mem_ops;
1243 mr->romd_mode = true;
1244 mr->global_locking = true;
1245 mr->destructor = memory_region_destructor_none;
1246 QTAILQ_INIT(&mr->subregions);
1247 QTAILQ_INIT(&mr->coalesced);
1249 op = object_property_add(OBJECT(mr), "container",
1250 "link<" TYPE_MEMORY_REGION ">",
1251 memory_region_get_container,
1252 NULL, /* memory_region_set_container */
1253 NULL, NULL, &error_abort);
1254 op->resolve = memory_region_resolve_container;
1256 object_property_add(OBJECT(mr), "addr", "uint64",
1257 memory_region_get_addr,
1258 NULL, /* memory_region_set_addr */
1259 NULL, NULL, &error_abort);
1260 object_property_add(OBJECT(mr), "priority", "uint32",
1261 memory_region_get_priority,
1262 NULL, /* memory_region_set_priority */
1263 NULL, NULL, &error_abort);
1264 object_property_add(OBJECT(mr), "size", "uint64",
1265 memory_region_get_size,
1266 NULL, /* memory_region_set_size, */
1267 NULL, NULL, &error_abort);
1270 static void iommu_memory_region_initfn(Object *obj)
1272 MemoryRegion *mr = MEMORY_REGION(obj);
1274 mr->is_iommu = true;
1277 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1280 #ifdef DEBUG_UNASSIGNED
1281 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1283 if (current_cpu != NULL) {
1284 bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
1285 cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
1290 static void unassigned_mem_write(void *opaque, hwaddr addr,
1291 uint64_t val, unsigned size)
1293 #ifdef DEBUG_UNASSIGNED
1294 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1296 if (current_cpu != NULL) {
1297 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1301 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1302 unsigned size, bool is_write,
1308 const MemoryRegionOps unassigned_mem_ops = {
1309 .valid.accepts = unassigned_mem_accepts,
1310 .endianness = DEVICE_NATIVE_ENDIAN,
1313 static uint64_t memory_region_ram_device_read(void *opaque,
1314 hwaddr addr, unsigned size)
1316 MemoryRegion *mr = opaque;
1317 uint64_t data = (uint64_t)~0;
1321 data = *(uint8_t *)(mr->ram_block->host + addr);
1324 data = *(uint16_t *)(mr->ram_block->host + addr);
1327 data = *(uint32_t *)(mr->ram_block->host + addr);
1330 data = *(uint64_t *)(mr->ram_block->host + addr);
1334 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1339 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1340 uint64_t data, unsigned size)
1342 MemoryRegion *mr = opaque;
1344 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1348 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1351 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1354 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1357 *(uint64_t *)(mr->ram_block->host + addr) = data;
1362 static const MemoryRegionOps ram_device_mem_ops = {
1363 .read = memory_region_ram_device_read,
1364 .write = memory_region_ram_device_write,
1365 .endianness = DEVICE_HOST_ENDIAN,
1367 .min_access_size = 1,
1368 .max_access_size = 8,
1372 .min_access_size = 1,
1373 .max_access_size = 8,
1378 bool memory_region_access_valid(MemoryRegion *mr,
1384 int access_size_min, access_size_max;
1387 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1391 if (!mr->ops->valid.accepts) {
1395 access_size_min = mr->ops->valid.min_access_size;
1396 if (!mr->ops->valid.min_access_size) {
1397 access_size_min = 1;
1400 access_size_max = mr->ops->valid.max_access_size;
1401 if (!mr->ops->valid.max_access_size) {
1402 access_size_max = 4;
1405 access_size = MAX(MIN(size, access_size_max), access_size_min);
1406 for (i = 0; i < size; i += access_size) {
1407 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1416 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1424 if (mr->ops->read) {
1425 return access_with_adjusted_size(addr, pval, size,
1426 mr->ops->impl.min_access_size,
1427 mr->ops->impl.max_access_size,
1428 memory_region_read_accessor,
1431 return access_with_adjusted_size(addr, pval, size,
1432 mr->ops->impl.min_access_size,
1433 mr->ops->impl.max_access_size,
1434 memory_region_read_with_attrs_accessor,
1439 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1447 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1448 *pval = unassigned_mem_read(mr, addr, size);
1449 return MEMTX_DECODE_ERROR;
1452 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1453 adjust_endianness(mr, pval, size);
1457 /* Return true if an eventfd was signalled */
1458 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1464 MemoryRegionIoeventfd ioeventfd = {
1465 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1470 for (i = 0; i < mr->ioeventfd_nb; i++) {
1471 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1472 ioeventfd.e = mr->ioeventfds[i].e;
1474 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1475 event_notifier_set(ioeventfd.e);
1483 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1489 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1490 unassigned_mem_write(mr, addr, data, size);
1491 return MEMTX_DECODE_ERROR;
1494 adjust_endianness(mr, &data, size);
1496 if ((!kvm_eventfds_enabled()) &&
1497 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1501 if (mr->ops->write) {
1502 return access_with_adjusted_size(addr, &data, size,
1503 mr->ops->impl.min_access_size,
1504 mr->ops->impl.max_access_size,
1505 memory_region_write_accessor, mr,
1509 access_with_adjusted_size(addr, &data, size,
1510 mr->ops->impl.min_access_size,
1511 mr->ops->impl.max_access_size,
1512 memory_region_write_with_attrs_accessor,
1517 void memory_region_init_io(MemoryRegion *mr,
1519 const MemoryRegionOps *ops,
1524 memory_region_init(mr, owner, name, size);
1525 mr->ops = ops ? ops : &unassigned_mem_ops;
1526 mr->opaque = opaque;
1527 mr->terminates = true;
1530 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1536 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1539 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1547 memory_region_init(mr, owner, name, size);
1549 mr->terminates = true;
1550 mr->destructor = memory_region_destructor_ram;
1551 mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
1552 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1554 mr->size = int128_zero();
1555 object_unparent(OBJECT(mr));
1556 error_propagate(errp, err);
1560 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1565 void (*resized)(const char*,
1571 memory_region_init(mr, owner, name, size);
1573 mr->terminates = true;
1574 mr->destructor = memory_region_destructor_ram;
1575 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1577 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1579 mr->size = int128_zero();
1580 object_unparent(OBJECT(mr));
1581 error_propagate(errp, err);
1586 void memory_region_init_ram_from_file(MemoryRegion *mr,
1587 struct Object *owner,
1596 memory_region_init(mr, owner, name, size);
1598 mr->terminates = true;
1599 mr->destructor = memory_region_destructor_ram;
1601 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
1602 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1604 mr->size = int128_zero();
1605 object_unparent(OBJECT(mr));
1606 error_propagate(errp, err);
1610 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1611 struct Object *owner,
1619 memory_region_init(mr, owner, name, size);
1621 mr->terminates = true;
1622 mr->destructor = memory_region_destructor_ram;
1623 mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
1624 share ? RAM_SHARED : 0,
1626 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1628 mr->size = int128_zero();
1629 object_unparent(OBJECT(mr));
1630 error_propagate(errp, err);
1635 void memory_region_init_ram_ptr(MemoryRegion *mr,
1641 memory_region_init(mr, owner, name, size);
1643 mr->terminates = true;
1644 mr->destructor = memory_region_destructor_ram;
1645 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1647 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1648 assert(ptr != NULL);
1649 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1652 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1658 memory_region_init(mr, owner, name, size);
1660 mr->terminates = true;
1661 mr->ram_device = true;
1662 mr->ops = &ram_device_mem_ops;
1664 mr->destructor = memory_region_destructor_ram;
1665 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1666 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1667 assert(ptr != NULL);
1668 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1671 void memory_region_init_alias(MemoryRegion *mr,
1678 memory_region_init(mr, owner, name, size);
1680 mr->alias_offset = offset;
1683 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1684 struct Object *owner,
1690 memory_region_init(mr, owner, name, size);
1692 mr->readonly = true;
1693 mr->terminates = true;
1694 mr->destructor = memory_region_destructor_ram;
1695 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1696 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1698 mr->size = int128_zero();
1699 object_unparent(OBJECT(mr));
1700 error_propagate(errp, err);
1704 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1706 const MemoryRegionOps *ops,
1714 memory_region_init(mr, owner, name, size);
1716 mr->opaque = opaque;
1717 mr->terminates = true;
1718 mr->rom_device = true;
1719 mr->destructor = memory_region_destructor_ram;
1720 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1722 mr->size = int128_zero();
1723 object_unparent(OBJECT(mr));
1724 error_propagate(errp, err);
1728 void memory_region_init_iommu(void *_iommu_mr,
1729 size_t instance_size,
1730 const char *mrtypename,
1735 struct IOMMUMemoryRegion *iommu_mr;
1736 struct MemoryRegion *mr;
1738 object_initialize(_iommu_mr, instance_size, mrtypename);
1739 mr = MEMORY_REGION(_iommu_mr);
1740 memory_region_do_init(mr, owner, name, size);
1741 iommu_mr = IOMMU_MEMORY_REGION(mr);
1742 mr->terminates = true; /* then re-forwards */
1743 QLIST_INIT(&iommu_mr->iommu_notify);
1744 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1747 static void memory_region_finalize(Object *obj)
1749 MemoryRegion *mr = MEMORY_REGION(obj);
1751 assert(!mr->container);
1753 /* We know the region is not visible in any address space (it
1754 * does not have a container and cannot be a root either because
1755 * it has no references, so we can blindly clear mr->enabled.
1756 * memory_region_set_enabled instead could trigger a transaction
1757 * and cause an infinite loop.
1759 mr->enabled = false;
1760 memory_region_transaction_begin();
1761 while (!QTAILQ_EMPTY(&mr->subregions)) {
1762 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1763 memory_region_del_subregion(mr, subregion);
1765 memory_region_transaction_commit();
1768 memory_region_clear_coalescing(mr);
1769 g_free((char *)mr->name);
1770 g_free(mr->ioeventfds);
1773 Object *memory_region_owner(MemoryRegion *mr)
1775 Object *obj = OBJECT(mr);
1779 void memory_region_ref(MemoryRegion *mr)
1781 /* MMIO callbacks most likely will access data that belongs
1782 * to the owner, hence the need to ref/unref the owner whenever
1783 * the memory region is in use.
1785 * The memory region is a child of its owner. As long as the
1786 * owner doesn't call unparent itself on the memory region,
1787 * ref-ing the owner will also keep the memory region alive.
1788 * Memory regions without an owner are supposed to never go away;
1789 * we do not ref/unref them because it slows down DMA sensibly.
1791 if (mr && mr->owner) {
1792 object_ref(mr->owner);
1796 void memory_region_unref(MemoryRegion *mr)
1798 if (mr && mr->owner) {
1799 object_unref(mr->owner);
1803 uint64_t memory_region_size(MemoryRegion *mr)
1805 if (int128_eq(mr->size, int128_2_64())) {
1808 return int128_get64(mr->size);
1811 const char *memory_region_name(const MemoryRegion *mr)
1814 ((MemoryRegion *)mr)->name =
1815 object_get_canonical_path_component(OBJECT(mr));
1820 bool memory_region_is_ram_device(MemoryRegion *mr)
1822 return mr->ram_device;
1825 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1827 uint8_t mask = mr->dirty_log_mask;
1828 if (global_dirty_log && mr->ram_block) {
1829 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1834 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1836 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1839 static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
1841 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1842 IOMMUNotifier *iommu_notifier;
1843 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1845 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1846 flags |= iommu_notifier->notifier_flags;
1849 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1850 imrc->notify_flag_changed(iommu_mr,
1851 iommu_mr->iommu_notify_flags,
1855 iommu_mr->iommu_notify_flags = flags;
1858 void memory_region_register_iommu_notifier(MemoryRegion *mr,
1861 IOMMUMemoryRegion *iommu_mr;
1864 memory_region_register_iommu_notifier(mr->alias, n);
1868 /* We need to register for at least one bitfield */
1869 iommu_mr = IOMMU_MEMORY_REGION(mr);
1870 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1871 assert(n->start <= n->end);
1872 assert(n->iommu_idx >= 0 &&
1873 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1875 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1876 memory_region_update_iommu_notify_flags(iommu_mr);
1879 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1881 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1883 if (imrc->get_min_page_size) {
1884 return imrc->get_min_page_size(iommu_mr);
1886 return TARGET_PAGE_SIZE;
1889 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1891 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1892 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1893 hwaddr addr, granularity;
1894 IOMMUTLBEntry iotlb;
1896 /* If the IOMMU has its own replay callback, override */
1898 imrc->replay(iommu_mr, n);
1902 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1904 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1905 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
1906 if (iotlb.perm != IOMMU_NONE) {
1907 n->notify(n, &iotlb);
1910 /* if (2^64 - MR size) < granularity, it's possible to get an
1911 * infinite loop here. This should catch such a wraparound */
1912 if ((addr + granularity) < addr) {
1918 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
1920 IOMMUNotifier *notifier;
1922 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1923 memory_region_iommu_replay(iommu_mr, notifier);
1927 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1930 IOMMUMemoryRegion *iommu_mr;
1933 memory_region_unregister_iommu_notifier(mr->alias, n);
1936 QLIST_REMOVE(n, node);
1937 iommu_mr = IOMMU_MEMORY_REGION(mr);
1938 memory_region_update_iommu_notify_flags(iommu_mr);
1941 void memory_region_notify_one(IOMMUNotifier *notifier,
1942 IOMMUTLBEntry *entry)
1944 IOMMUNotifierFlag request_flags;
1947 * Skip the notification if the notification does not overlap
1948 * with registered range.
1950 if (notifier->start > entry->iova + entry->addr_mask ||
1951 notifier->end < entry->iova) {
1955 if (entry->perm & IOMMU_RW) {
1956 request_flags = IOMMU_NOTIFIER_MAP;
1958 request_flags = IOMMU_NOTIFIER_UNMAP;
1961 if (notifier->notifier_flags & request_flags) {
1962 notifier->notify(notifier, entry);
1966 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1968 IOMMUTLBEntry entry)
1970 IOMMUNotifier *iommu_notifier;
1972 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
1974 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1975 if (iommu_notifier->iommu_idx == iommu_idx) {
1976 memory_region_notify_one(iommu_notifier, &entry);
1981 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1982 enum IOMMUMemoryRegionAttr attr,
1985 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1987 if (!imrc->get_attr) {
1991 return imrc->get_attr(iommu_mr, attr, data);
1994 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1997 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1999 if (!imrc->attrs_to_index) {
2003 return imrc->attrs_to_index(iommu_mr, attrs);
2006 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2008 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2010 if (!imrc->num_indexes) {
2014 return imrc->num_indexes(iommu_mr);
2017 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2019 uint8_t mask = 1 << client;
2020 uint8_t old_logging;
2022 assert(client == DIRTY_MEMORY_VGA);
2023 old_logging = mr->vga_logging_count;
2024 mr->vga_logging_count += log ? 1 : -1;
2025 if (!!old_logging == !!mr->vga_logging_count) {
2029 memory_region_transaction_begin();
2030 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2031 memory_region_update_pending |= mr->enabled;
2032 memory_region_transaction_commit();
2035 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2038 assert(mr->ram_block);
2039 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2041 memory_region_get_dirty_log_mask(mr));
2044 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
2046 MemoryListener *listener;
2051 /* If the same address space has multiple log_sync listeners, we
2052 * visit that address space's FlatView multiple times. But because
2053 * log_sync listeners are rare, it's still cheaper than walking each
2054 * address space once.
2056 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2057 if (!listener->log_sync) {
2060 as = listener->address_space;
2061 view = address_space_get_flatview(as);
2062 FOR_EACH_FLAT_RANGE(fr, view) {
2063 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2064 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2065 listener->log_sync(listener, &mrs);
2068 flatview_unref(view);
2072 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2075 MemoryRegionSection mrs;
2076 MemoryListener *listener;
2080 hwaddr sec_start, sec_end, sec_size;
2082 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2083 if (!listener->log_clear) {
2086 as = listener->address_space;
2087 view = address_space_get_flatview(as);
2088 FOR_EACH_FLAT_RANGE(fr, view) {
2089 if (!fr->dirty_log_mask || fr->mr != mr) {
2091 * Clear dirty bitmap operation only applies to those
2092 * regions whose dirty logging is at least enabled
2097 mrs = section_from_flat_range(fr, view);
2099 sec_start = MAX(mrs.offset_within_region, start);
2100 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2101 sec_end = MIN(sec_end, start + len);
2103 if (sec_start >= sec_end) {
2105 * If this memory region section has no intersection
2106 * with the requested range, skip.
2111 /* Valid case; shrink the section if needed */
2112 mrs.offset_within_address_space +=
2113 sec_start - mrs.offset_within_region;
2114 mrs.offset_within_region = sec_start;
2115 sec_size = sec_end - sec_start;
2116 mrs.size = int128_make64(sec_size);
2117 listener->log_clear(listener, &mrs);
2119 flatview_unref(view);
2123 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2128 assert(mr->ram_block);
2129 memory_region_sync_dirty_bitmap(mr);
2130 return cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2133 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2134 hwaddr addr, hwaddr size)
2136 assert(mr->ram_block);
2137 return cpu_physical_memory_snapshot_get_dirty(snap,
2138 memory_region_get_ram_addr(mr) + addr, size);
2141 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2143 if (mr->readonly != readonly) {
2144 memory_region_transaction_begin();
2145 mr->readonly = readonly;
2146 memory_region_update_pending |= mr->enabled;
2147 memory_region_transaction_commit();
2151 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2153 if (mr->nonvolatile != nonvolatile) {
2154 memory_region_transaction_begin();
2155 mr->nonvolatile = nonvolatile;
2156 memory_region_update_pending |= mr->enabled;
2157 memory_region_transaction_commit();
2161 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2163 if (mr->romd_mode != romd_mode) {
2164 memory_region_transaction_begin();
2165 mr->romd_mode = romd_mode;
2166 memory_region_update_pending |= mr->enabled;
2167 memory_region_transaction_commit();
2171 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2172 hwaddr size, unsigned client)
2174 assert(mr->ram_block);
2175 cpu_physical_memory_test_and_clear_dirty(
2176 memory_region_get_ram_addr(mr) + addr, size, client);
2179 int memory_region_get_fd(MemoryRegion *mr)
2187 fd = mr->ram_block->fd;
2193 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2196 uint64_t offset = 0;
2200 offset += mr->alias_offset;
2203 assert(mr->ram_block);
2204 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2210 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2214 block = qemu_ram_block_from_host(ptr, false, offset);
2222 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2224 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2227 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2229 assert(mr->ram_block);
2231 qemu_ram_resize(mr->ram_block, newsize, errp);
2234 static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
2239 view = address_space_get_flatview(as);
2240 FOR_EACH_FLAT_RANGE(fr, view) {
2242 flat_range_coalesced_io_del(fr, as);
2243 flat_range_coalesced_io_add(fr, as);
2246 flatview_unref(view);
2249 static void memory_region_update_coalesced_range(MemoryRegion *mr)
2253 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2254 memory_region_update_coalesced_range_as(mr, as);
2258 void memory_region_set_coalescing(MemoryRegion *mr)
2260 memory_region_clear_coalescing(mr);
2261 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2264 void memory_region_add_coalescing(MemoryRegion *mr,
2268 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2270 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2271 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2272 memory_region_update_coalesced_range(mr);
2273 memory_region_set_flush_coalesced(mr);
2276 void memory_region_clear_coalescing(MemoryRegion *mr)
2278 CoalescedMemoryRange *cmr;
2279 bool updated = false;
2281 qemu_flush_coalesced_mmio_buffer();
2282 mr->flush_coalesced_mmio = false;
2284 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2285 cmr = QTAILQ_FIRST(&mr->coalesced);
2286 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2292 memory_region_update_coalesced_range(mr);
2296 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2298 mr->flush_coalesced_mmio = true;
2301 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2303 qemu_flush_coalesced_mmio_buffer();
2304 if (QTAILQ_EMPTY(&mr->coalesced)) {
2305 mr->flush_coalesced_mmio = false;
2309 void memory_region_clear_global_locking(MemoryRegion *mr)
2311 mr->global_locking = false;
2314 static bool userspace_eventfd_warning;
2316 void memory_region_add_eventfd(MemoryRegion *mr,
2323 MemoryRegionIoeventfd mrfd = {
2324 .addr.start = int128_make64(addr),
2325 .addr.size = int128_make64(size),
2326 .match_data = match_data,
2332 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2333 userspace_eventfd_warning))) {
2334 userspace_eventfd_warning = true;
2335 error_report("Using eventfd without MMIO binding in KVM. "
2336 "Suboptimal performance expected");
2340 adjust_endianness(mr, &mrfd.data, size);
2342 memory_region_transaction_begin();
2343 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2344 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2349 mr->ioeventfds = g_realloc(mr->ioeventfds,
2350 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2351 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2352 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2353 mr->ioeventfds[i] = mrfd;
2354 ioeventfd_update_pending |= mr->enabled;
2355 memory_region_transaction_commit();
2358 void memory_region_del_eventfd(MemoryRegion *mr,
2365 MemoryRegionIoeventfd mrfd = {
2366 .addr.start = int128_make64(addr),
2367 .addr.size = int128_make64(size),
2368 .match_data = match_data,
2375 adjust_endianness(mr, &mrfd.data, size);
2377 memory_region_transaction_begin();
2378 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2379 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2383 assert(i != mr->ioeventfd_nb);
2384 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2385 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2387 mr->ioeventfds = g_realloc(mr->ioeventfds,
2388 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2389 ioeventfd_update_pending |= mr->enabled;
2390 memory_region_transaction_commit();
2393 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2395 MemoryRegion *mr = subregion->container;
2396 MemoryRegion *other;
2398 memory_region_transaction_begin();
2400 memory_region_ref(subregion);
2401 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2402 if (subregion->priority >= other->priority) {
2403 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2407 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2409 memory_region_update_pending |= mr->enabled && subregion->enabled;
2410 memory_region_transaction_commit();
2413 static void memory_region_add_subregion_common(MemoryRegion *mr,
2415 MemoryRegion *subregion)
2417 assert(!subregion->container);
2418 subregion->container = mr;
2419 subregion->addr = offset;
2420 memory_region_update_container_subregions(subregion);
2423 void memory_region_add_subregion(MemoryRegion *mr,
2425 MemoryRegion *subregion)
2427 subregion->priority = 0;
2428 memory_region_add_subregion_common(mr, offset, subregion);
2431 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2433 MemoryRegion *subregion,
2436 subregion->priority = priority;
2437 memory_region_add_subregion_common(mr, offset, subregion);
2440 void memory_region_del_subregion(MemoryRegion *mr,
2441 MemoryRegion *subregion)
2443 memory_region_transaction_begin();
2444 assert(subregion->container == mr);
2445 subregion->container = NULL;
2446 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2447 memory_region_unref(subregion);
2448 memory_region_update_pending |= mr->enabled && subregion->enabled;
2449 memory_region_transaction_commit();
2452 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2454 if (enabled == mr->enabled) {
2457 memory_region_transaction_begin();
2458 mr->enabled = enabled;
2459 memory_region_update_pending = true;
2460 memory_region_transaction_commit();
2463 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2465 Int128 s = int128_make64(size);
2467 if (size == UINT64_MAX) {
2470 if (int128_eq(s, mr->size)) {
2473 memory_region_transaction_begin();
2475 memory_region_update_pending = true;
2476 memory_region_transaction_commit();
2479 static void memory_region_readd_subregion(MemoryRegion *mr)
2481 MemoryRegion *container = mr->container;
2484 memory_region_transaction_begin();
2485 memory_region_ref(mr);
2486 memory_region_del_subregion(container, mr);
2487 mr->container = container;
2488 memory_region_update_container_subregions(mr);
2489 memory_region_unref(mr);
2490 memory_region_transaction_commit();
2494 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2496 if (addr != mr->addr) {
2498 memory_region_readd_subregion(mr);
2502 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2506 if (offset == mr->alias_offset) {
2510 memory_region_transaction_begin();
2511 mr->alias_offset = offset;
2512 memory_region_update_pending |= mr->enabled;
2513 memory_region_transaction_commit();
2516 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2521 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2523 const AddrRange *addr = addr_;
2524 const FlatRange *fr = fr_;
2526 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2528 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2534 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2536 return bsearch(&addr, view->ranges, view->nr,
2537 sizeof(FlatRange), cmp_flatrange_addr);
2540 bool memory_region_is_mapped(MemoryRegion *mr)
2542 return mr->container ? true : false;
2545 /* Same as memory_region_find, but it does not add a reference to the
2546 * returned region. It must be called from an RCU critical section.
2548 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2549 hwaddr addr, uint64_t size)
2551 MemoryRegionSection ret = { .mr = NULL };
2559 for (root = mr; root->container; ) {
2560 root = root->container;
2564 as = memory_region_to_address_space(root);
2568 range = addrrange_make(int128_make64(addr), int128_make64(size));
2570 view = address_space_to_flatview(as);
2571 fr = flatview_lookup(view, range);
2576 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2582 range = addrrange_intersection(range, fr->addr);
2583 ret.offset_within_region = fr->offset_in_region;
2584 ret.offset_within_region += int128_get64(int128_sub(range.start,
2586 ret.size = range.size;
2587 ret.offset_within_address_space = int128_get64(range.start);
2588 ret.readonly = fr->readonly;
2589 ret.nonvolatile = fr->nonvolatile;
2593 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2594 hwaddr addr, uint64_t size)
2596 MemoryRegionSection ret;
2598 ret = memory_region_find_rcu(mr, addr, size);
2600 memory_region_ref(ret.mr);
2606 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2611 mr = memory_region_find_rcu(container, addr, 1).mr;
2613 return mr && mr != container;
2616 void memory_global_dirty_log_sync(void)
2618 memory_region_sync_dirty_bitmap(NULL);
2621 static VMChangeStateEntry *vmstate_change;
2623 void memory_global_dirty_log_start(void)
2625 if (vmstate_change) {
2626 qemu_del_vm_change_state_handler(vmstate_change);
2627 vmstate_change = NULL;
2630 global_dirty_log = true;
2632 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2634 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2635 memory_region_transaction_begin();
2636 memory_region_update_pending = true;
2637 memory_region_transaction_commit();
2640 static void memory_global_dirty_log_do_stop(void)
2642 global_dirty_log = false;
2644 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2645 memory_region_transaction_begin();
2646 memory_region_update_pending = true;
2647 memory_region_transaction_commit();
2649 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2652 static void memory_vm_change_state_handler(void *opaque, int running,
2656 memory_global_dirty_log_do_stop();
2658 if (vmstate_change) {
2659 qemu_del_vm_change_state_handler(vmstate_change);
2660 vmstate_change = NULL;
2665 void memory_global_dirty_log_stop(void)
2667 if (!runstate_is_running()) {
2668 if (vmstate_change) {
2671 vmstate_change = qemu_add_vm_change_state_handler(
2672 memory_vm_change_state_handler, NULL);
2676 memory_global_dirty_log_do_stop();
2679 static void listener_add_address_space(MemoryListener *listener,
2685 if (listener->begin) {
2686 listener->begin(listener);
2688 if (global_dirty_log) {
2689 if (listener->log_global_start) {
2690 listener->log_global_start(listener);
2694 view = address_space_get_flatview(as);
2695 FOR_EACH_FLAT_RANGE(fr, view) {
2696 MemoryRegionSection section = section_from_flat_range(fr, view);
2698 if (listener->region_add) {
2699 listener->region_add(listener, §ion);
2701 if (fr->dirty_log_mask && listener->log_start) {
2702 listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
2705 if (listener->commit) {
2706 listener->commit(listener);
2708 flatview_unref(view);
2711 static void listener_del_address_space(MemoryListener *listener,
2717 if (listener->begin) {
2718 listener->begin(listener);
2720 view = address_space_get_flatview(as);
2721 FOR_EACH_FLAT_RANGE(fr, view) {
2722 MemoryRegionSection section = section_from_flat_range(fr, view);
2724 if (fr->dirty_log_mask && listener->log_stop) {
2725 listener->log_stop(listener, §ion, fr->dirty_log_mask, 0);
2727 if (listener->region_del) {
2728 listener->region_del(listener, §ion);
2731 if (listener->commit) {
2732 listener->commit(listener);
2734 flatview_unref(view);
2737 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2739 MemoryListener *other = NULL;
2741 listener->address_space = as;
2742 if (QTAILQ_EMPTY(&memory_listeners)
2743 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
2744 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2746 QTAILQ_FOREACH(other, &memory_listeners, link) {
2747 if (listener->priority < other->priority) {
2751 QTAILQ_INSERT_BEFORE(other, listener, link);
2754 if (QTAILQ_EMPTY(&as->listeners)
2755 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
2756 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2758 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2759 if (listener->priority < other->priority) {
2763 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2766 listener_add_address_space(listener, as);
2769 void memory_listener_unregister(MemoryListener *listener)
2771 if (!listener->address_space) {
2775 listener_del_address_space(listener, listener->address_space);
2776 QTAILQ_REMOVE(&memory_listeners, listener, link);
2777 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2778 listener->address_space = NULL;
2781 void address_space_remove_listeners(AddressSpace *as)
2783 while (!QTAILQ_EMPTY(&as->listeners)) {
2784 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
2788 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2790 memory_region_ref(root);
2792 as->current_map = NULL;
2793 as->ioeventfd_nb = 0;
2794 as->ioeventfds = NULL;
2795 QTAILQ_INIT(&as->listeners);
2796 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2797 as->name = g_strdup(name ? name : "anonymous");
2798 address_space_update_topology(as);
2799 address_space_update_ioeventfds(as);
2802 static void do_address_space_destroy(AddressSpace *as)
2804 assert(QTAILQ_EMPTY(&as->listeners));
2806 flatview_unref(as->current_map);
2808 g_free(as->ioeventfds);
2809 memory_region_unref(as->root);
2812 void address_space_destroy(AddressSpace *as)
2814 MemoryRegion *root = as->root;
2816 /* Flush out anything from MemoryListeners listening in on this */
2817 memory_region_transaction_begin();
2819 memory_region_transaction_commit();
2820 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2822 /* At this point, as->dispatch and as->current_map are dummy
2823 * entries that the guest should never use. Wait for the old
2824 * values to expire before freeing the data.
2827 call_rcu(as, do_address_space_destroy, rcu);
2830 static const char *memory_region_type(MemoryRegion *mr)
2832 if (memory_region_is_ram_device(mr)) {
2834 } else if (memory_region_is_romd(mr)) {
2836 } else if (memory_region_is_rom(mr)) {
2838 } else if (memory_region_is_ram(mr)) {
2845 typedef struct MemoryRegionList MemoryRegionList;
2847 struct MemoryRegionList {
2848 const MemoryRegion *mr;
2849 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
2852 typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
2854 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2855 int128_sub((size), int128_one())) : 0)
2856 #define MTREE_INDENT " "
2858 static void mtree_expand_owner(const char *label, Object *obj)
2860 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
2862 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
2863 if (dev && dev->id) {
2864 qemu_printf(" id=%s", dev->id);
2866 gchar *canonical_path = object_get_canonical_path(obj);
2867 if (canonical_path) {
2868 qemu_printf(" path=%s", canonical_path);
2869 g_free(canonical_path);
2871 qemu_printf(" type=%s", object_get_typename(obj));
2877 static void mtree_print_mr_owner(const MemoryRegion *mr)
2879 Object *owner = mr->owner;
2880 Object *parent = memory_region_owner((MemoryRegion *)mr);
2882 if (!owner && !parent) {
2883 qemu_printf(" orphan");
2887 mtree_expand_owner("owner", owner);
2889 if (parent && parent != owner) {
2890 mtree_expand_owner("parent", parent);
2894 static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
2896 MemoryRegionListHead *alias_print_queue,
2899 MemoryRegionList *new_ml, *ml, *next_ml;
2900 MemoryRegionListHead submr_print_queue;
2901 const MemoryRegion *submr;
2903 hwaddr cur_start, cur_end;
2909 for (i = 0; i < level; i++) {
2910 qemu_printf(MTREE_INDENT);
2913 cur_start = base + mr->addr;
2914 cur_end = cur_start + MR_SIZE(mr->size);
2917 * Try to detect overflow of memory region. This should never
2918 * happen normally. When it happens, we dump something to warn the
2919 * user who is observing this.
2921 if (cur_start < base || cur_end < cur_start) {
2922 qemu_printf("[DETECTED OVERFLOW!] ");
2926 MemoryRegionList *ml;
2929 /* check if the alias is already in the queue */
2930 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
2931 if (ml->mr == mr->alias) {
2937 ml = g_new(MemoryRegionList, 1);
2939 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
2941 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2942 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
2943 "-" TARGET_FMT_plx "%s",
2946 mr->nonvolatile ? "nv-" : "",
2947 memory_region_type((MemoryRegion *)mr),
2948 memory_region_name(mr),
2949 memory_region_name(mr->alias),
2951 mr->alias_offset + MR_SIZE(mr->size),
2952 mr->enabled ? "" : " [disabled]");
2954 mtree_print_mr_owner(mr);
2957 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2958 " (prio %d, %s%s): %s%s",
2961 mr->nonvolatile ? "nv-" : "",
2962 memory_region_type((MemoryRegion *)mr),
2963 memory_region_name(mr),
2964 mr->enabled ? "" : " [disabled]");
2966 mtree_print_mr_owner(mr);
2971 QTAILQ_INIT(&submr_print_queue);
2973 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2974 new_ml = g_new(MemoryRegionList, 1);
2976 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2977 if (new_ml->mr->addr < ml->mr->addr ||
2978 (new_ml->mr->addr == ml->mr->addr &&
2979 new_ml->mr->priority > ml->mr->priority)) {
2980 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
2986 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
2990 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2991 mtree_print_mr(ml->mr, level + 1, cur_start,
2992 alias_print_queue, owner);
2995 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
3000 struct FlatViewInfo {
3005 const char *ac_name;
3008 static void mtree_print_flatview(gpointer key, gpointer value,
3011 FlatView *view = key;
3012 GArray *fv_address_spaces = value;
3013 struct FlatViewInfo *fvi = user_data;
3014 FlatRange *range = &view->ranges[0];
3020 qemu_printf("FlatView #%d\n", fvi->counter);
3023 for (i = 0; i < fv_address_spaces->len; ++i) {
3024 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3025 qemu_printf(" AS \"%s\", root: %s",
3026 as->name, memory_region_name(as->root));
3027 if (as->root->alias) {
3028 qemu_printf(", alias %s", memory_region_name(as->root->alias));
3033 qemu_printf(" Root memory region: %s\n",
3034 view->root ? memory_region_name(view->root) : "(none)");
3037 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
3043 if (range->offset_in_region) {
3044 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3045 " (prio %d, %s%s): %s @" TARGET_FMT_plx,
3046 int128_get64(range->addr.start),
3047 int128_get64(range->addr.start)
3048 + MR_SIZE(range->addr.size),
3050 range->nonvolatile ? "nv-" : "",
3051 range->readonly ? "rom" : memory_region_type(mr),
3052 memory_region_name(mr),
3053 range->offset_in_region);
3055 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3056 " (prio %d, %s%s): %s",
3057 int128_get64(range->addr.start),
3058 int128_get64(range->addr.start)
3059 + MR_SIZE(range->addr.size),
3061 range->nonvolatile ? "nv-" : "",
3062 range->readonly ? "rom" : memory_region_type(mr),
3063 memory_region_name(mr));
3066 mtree_print_mr_owner(mr);
3070 for (i = 0; i < fv_address_spaces->len; ++i) {
3071 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3072 if (fvi->ac->has_memory(current_machine, as,
3073 int128_get64(range->addr.start),
3074 MR_SIZE(range->addr.size) + 1)) {
3075 qemu_printf(" %s", fvi->ac_name);
3083 #if !defined(CONFIG_USER_ONLY)
3084 if (fvi->dispatch_tree && view->root) {
3085 mtree_print_dispatch(view->dispatch, view->root);
3092 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3095 FlatView *view = key;
3096 GArray *fv_address_spaces = value;
3098 g_array_unref(fv_address_spaces);
3099 flatview_unref(view);
3104 void mtree_info(bool flatview, bool dispatch_tree, bool owner)
3106 MemoryRegionListHead ml_head;
3107 MemoryRegionList *ml, *ml2;
3112 struct FlatViewInfo fvi = {
3114 .dispatch_tree = dispatch_tree,
3117 GArray *fv_address_spaces;
3118 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3119 AccelClass *ac = ACCEL_GET_CLASS(current_machine->accelerator);
3121 if (ac->has_memory) {
3123 fvi.ac_name = current_machine->accel ? current_machine->accel :
3124 object_class_get_name(OBJECT_CLASS(ac));
3127 /* Gather all FVs in one table */
3128 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3129 view = address_space_get_flatview(as);
3131 fv_address_spaces = g_hash_table_lookup(views, view);
3132 if (!fv_address_spaces) {
3133 fv_address_spaces = g_array_new(false, false, sizeof(as));
3134 g_hash_table_insert(views, view, fv_address_spaces);
3137 g_array_append_val(fv_address_spaces, as);
3141 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3144 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3145 g_hash_table_unref(views);
3150 QTAILQ_INIT(&ml_head);
3152 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3153 qemu_printf("address-space: %s\n", as->name);
3154 mtree_print_mr(as->root, 1, 0, &ml_head, owner);
3158 /* print aliased regions */
3159 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3160 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3161 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner);
3165 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3170 void memory_region_init_ram(MemoryRegion *mr,
3171 struct Object *owner,
3176 DeviceState *owner_dev;
3179 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3181 error_propagate(errp, err);
3184 /* This will assert if owner is neither NULL nor a DeviceState.
3185 * We only want the owner here for the purposes of defining a
3186 * unique name for migration. TODO: Ideally we should implement
3187 * a naming scheme for Objects which are not DeviceStates, in
3188 * which case we can relax this restriction.
3190 owner_dev = DEVICE(owner);
3191 vmstate_register_ram(mr, owner_dev);
3194 void memory_region_init_rom(MemoryRegion *mr,
3195 struct Object *owner,
3200 DeviceState *owner_dev;
3203 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3205 error_propagate(errp, err);
3208 /* This will assert if owner is neither NULL nor a DeviceState.
3209 * We only want the owner here for the purposes of defining a
3210 * unique name for migration. TODO: Ideally we should implement
3211 * a naming scheme for Objects which are not DeviceStates, in
3212 * which case we can relax this restriction.
3214 owner_dev = DEVICE(owner);
3215 vmstate_register_ram(mr, owner_dev);
3218 void memory_region_init_rom_device(MemoryRegion *mr,
3219 struct Object *owner,
3220 const MemoryRegionOps *ops,
3226 DeviceState *owner_dev;
3229 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3232 error_propagate(errp, err);
3235 /* This will assert if owner is neither NULL nor a DeviceState.
3236 * We only want the owner here for the purposes of defining a
3237 * unique name for migration. TODO: Ideally we should implement
3238 * a naming scheme for Objects which are not DeviceStates, in
3239 * which case we can relax this restriction.
3241 owner_dev = DEVICE(owner);
3242 vmstate_register_ram(mr, owner_dev);
3245 static const TypeInfo memory_region_info = {
3246 .parent = TYPE_OBJECT,
3247 .name = TYPE_MEMORY_REGION,
3248 .class_size = sizeof(MemoryRegionClass),
3249 .instance_size = sizeof(MemoryRegion),
3250 .instance_init = memory_region_initfn,
3251 .instance_finalize = memory_region_finalize,
3254 static const TypeInfo iommu_memory_region_info = {
3255 .parent = TYPE_MEMORY_REGION,
3256 .name = TYPE_IOMMU_MEMORY_REGION,
3257 .class_size = sizeof(IOMMUMemoryRegionClass),
3258 .instance_size = sizeof(IOMMUMemoryRegion),
3259 .instance_init = iommu_memory_region_initfn,
3263 static void memory_register_types(void)
3265 type_register_static(&memory_region_info);
3266 type_register_static(&iommu_memory_region_info);
3269 type_init(memory_register_types)