2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
18 #include "qapi/error.h"
19 #include "exec/memory.h"
20 #include "qapi/visitor.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/main-loop.h"
24 #include "qemu/qemu-print.h"
25 #include "qom/object.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/runstate.h"
32 #include "sysemu/tcg.h"
33 #include "qemu/accel.h"
34 #include "hw/boards.h"
35 #include "migration/vmstate.h"
37 //#define DEBUG_UNASSIGNED
39 static unsigned memory_region_transaction_depth;
40 static bool memory_region_update_pending;
41 static bool ioeventfd_update_pending;
42 unsigned int global_dirty_tracking;
44 static QTAILQ_HEAD(, MemoryListener) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
47 static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
50 static GHashTable *flat_views;
52 typedef struct AddrRange AddrRange;
55 * Note that signed integers are needed for negative offsetting in aliases
56 * (large MemoryRegion::alias_offset).
63 static AddrRange addrrange_make(Int128 start, Int128 size)
65 return (AddrRange) { start, size };
68 static bool addrrange_equal(AddrRange r1, AddrRange r2)
70 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
73 static Int128 addrrange_end(AddrRange r)
75 return int128_add(r.start, r.size);
78 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
80 int128_addto(&range.start, delta);
84 static bool addrrange_contains(AddrRange range, Int128 addr)
86 return int128_ge(addr, range.start)
87 && int128_lt(addr, addrrange_end(range));
90 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
92 return addrrange_contains(r1, r2.start)
93 || addrrange_contains(r2, r1.start);
96 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
98 Int128 start = int128_max(r1.start, r2.start);
99 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
100 return addrrange_make(start, int128_sub(end, start));
103 enum ListenerDirection { Forward, Reverse };
105 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
107 MemoryListener *_listener; \
109 switch (_direction) { \
111 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
112 if (_listener->_callback) { \
113 _listener->_callback(_listener, ##_args); \
118 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
119 if (_listener->_callback) { \
120 _listener->_callback(_listener, ##_args); \
129 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
131 MemoryListener *_listener; \
133 switch (_direction) { \
135 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
136 if (_listener->_callback) { \
137 _listener->_callback(_listener, _section, ##_args); \
142 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
143 if (_listener->_callback) { \
144 _listener->_callback(_listener, _section, ##_args); \
153 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
154 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
156 MemoryRegionSection mrs = section_from_flat_range(fr, \
157 address_space_to_flatview(as)); \
158 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
161 struct CoalescedMemoryRange {
163 QTAILQ_ENTRY(CoalescedMemoryRange) link;
166 struct MemoryRegionIoeventfd {
173 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
174 MemoryRegionIoeventfd *b)
176 if (int128_lt(a->addr.start, b->addr.start)) {
178 } else if (int128_gt(a->addr.start, b->addr.start)) {
180 } else if (int128_lt(a->addr.size, b->addr.size)) {
182 } else if (int128_gt(a->addr.size, b->addr.size)) {
184 } else if (a->match_data < b->match_data) {
186 } else if (a->match_data > b->match_data) {
188 } else if (a->match_data) {
189 if (a->data < b->data) {
191 } else if (a->data > b->data) {
197 } else if (a->e > b->e) {
203 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
204 MemoryRegionIoeventfd *b)
206 if (int128_eq(a->addr.start, b->addr.start) &&
207 (!int128_nz(a->addr.size) || !int128_nz(b->addr.size) ||
208 (int128_eq(a->addr.size, b->addr.size) &&
209 (a->match_data == b->match_data) &&
210 ((a->match_data && (a->data == b->data)) || !a->match_data) &&
217 /* Range of memory in the global map. Addresses are absolute. */
220 hwaddr offset_in_region;
222 uint8_t dirty_log_mask;
228 #define FOR_EACH_FLAT_RANGE(var, view) \
229 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
231 static inline MemoryRegionSection
232 section_from_flat_range(FlatRange *fr, FlatView *fv)
234 return (MemoryRegionSection) {
237 .offset_within_region = fr->offset_in_region,
238 .size = fr->addr.size,
239 .offset_within_address_space = int128_get64(fr->addr.start),
240 .readonly = fr->readonly,
241 .nonvolatile = fr->nonvolatile,
245 static bool flatrange_equal(FlatRange *a, FlatRange *b)
247 return a->mr == b->mr
248 && addrrange_equal(a->addr, b->addr)
249 && a->offset_in_region == b->offset_in_region
250 && a->romd_mode == b->romd_mode
251 && a->readonly == b->readonly
252 && a->nonvolatile == b->nonvolatile;
255 static FlatView *flatview_new(MemoryRegion *mr_root)
259 view = g_new0(FlatView, 1);
261 view->root = mr_root;
262 memory_region_ref(mr_root);
263 trace_flatview_new(view, mr_root);
268 /* Insert a range into a given position. Caller is responsible for maintaining
271 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
273 if (view->nr == view->nr_allocated) {
274 view->nr_allocated = MAX(2 * view->nr, 10);
275 view->ranges = g_realloc(view->ranges,
276 view->nr_allocated * sizeof(*view->ranges));
278 memmove(view->ranges + pos + 1, view->ranges + pos,
279 (view->nr - pos) * sizeof(FlatRange));
280 view->ranges[pos] = *range;
281 memory_region_ref(range->mr);
285 static void flatview_destroy(FlatView *view)
289 trace_flatview_destroy(view, view->root);
290 if (view->dispatch) {
291 address_space_dispatch_free(view->dispatch);
293 for (i = 0; i < view->nr; i++) {
294 memory_region_unref(view->ranges[i].mr);
296 g_free(view->ranges);
297 memory_region_unref(view->root);
301 static bool flatview_ref(FlatView *view)
303 return qatomic_fetch_inc_nonzero(&view->ref) > 0;
306 void flatview_unref(FlatView *view)
308 if (qatomic_fetch_dec(&view->ref) == 1) {
309 trace_flatview_destroy_rcu(view, view->root);
311 call_rcu(view, flatview_destroy, rcu);
315 static bool can_merge(FlatRange *r1, FlatRange *r2)
317 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
319 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
321 int128_make64(r2->offset_in_region))
322 && r1->dirty_log_mask == r2->dirty_log_mask
323 && r1->romd_mode == r2->romd_mode
324 && r1->readonly == r2->readonly
325 && r1->nonvolatile == r2->nonvolatile;
328 /* Attempt to simplify a view by merging adjacent ranges */
329 static void flatview_simplify(FlatView *view)
334 while (i < view->nr) {
337 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
338 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
342 for (k = i; k < j; k++) {
343 memory_region_unref(view->ranges[k].mr);
345 memmove(&view->ranges[i], &view->ranges[j],
346 (view->nr - j) * sizeof(view->ranges[j]));
351 static bool memory_region_big_endian(MemoryRegion *mr)
353 #ifdef TARGET_WORDS_BIGENDIAN
354 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
356 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
360 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
362 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
363 switch (op & MO_SIZE) {
367 *data = bswap16(*data);
370 *data = bswap32(*data);
373 *data = bswap64(*data);
376 g_assert_not_reached();
381 static inline void memory_region_shift_read_access(uint64_t *value,
387 *value |= (tmp & mask) << shift;
389 *value |= (tmp & mask) >> -shift;
393 static inline uint64_t memory_region_shift_write_access(uint64_t *value,
400 tmp = (*value >> shift) & mask;
402 tmp = (*value << -shift) & mask;
408 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
411 hwaddr abs_addr = offset;
413 abs_addr += mr->addr;
414 for (root = mr; root->container; ) {
415 root = root->container;
416 abs_addr += root->addr;
422 static int get_cpu_index(void)
425 return current_cpu->cpu_index;
430 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
440 tmp = mr->ops->read(mr->opaque, addr, size);
442 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
443 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
444 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
445 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
446 memory_region_name(mr));
448 memory_region_shift_read_access(value, shift, mask, tmp);
452 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
463 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
465 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
466 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
467 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
468 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
469 memory_region_name(mr));
471 memory_region_shift_read_access(value, shift, mask, tmp);
475 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
483 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
486 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
487 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
488 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
489 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
490 memory_region_name(mr));
492 mr->ops->write(mr->opaque, addr, tmp, size);
496 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
504 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
507 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
508 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
509 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
510 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
511 memory_region_name(mr));
513 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
516 static MemTxResult access_with_adjusted_size(hwaddr addr,
519 unsigned access_size_min,
520 unsigned access_size_max,
521 MemTxResult (*access_fn)
532 uint64_t access_mask;
533 unsigned access_size;
535 MemTxResult r = MEMTX_OK;
537 if (!access_size_min) {
540 if (!access_size_max) {
544 /* FIXME: support unaligned access? */
545 access_size = MAX(MIN(size, access_size_max), access_size_min);
546 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
547 if (memory_region_big_endian(mr)) {
548 for (i = 0; i < size; i += access_size) {
549 r |= access_fn(mr, addr + i, value, access_size,
550 (size - access_size - i) * 8, access_mask, attrs);
553 for (i = 0; i < size; i += access_size) {
554 r |= access_fn(mr, addr + i, value, access_size, i * 8,
561 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
565 while (mr->container) {
568 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
569 if (mr == as->root) {
576 /* Render a memory region into the global view. Ranges in @view obscure
579 static void render_memory_region(FlatView *view,
586 MemoryRegion *subregion;
588 hwaddr offset_in_region;
598 int128_addto(&base, int128_make64(mr->addr));
599 readonly |= mr->readonly;
600 nonvolatile |= mr->nonvolatile;
602 tmp = addrrange_make(base, mr->size);
604 if (!addrrange_intersects(tmp, clip)) {
608 clip = addrrange_intersection(tmp, clip);
611 int128_subfrom(&base, int128_make64(mr->alias->addr));
612 int128_subfrom(&base, int128_make64(mr->alias_offset));
613 render_memory_region(view, mr->alias, base, clip,
614 readonly, nonvolatile);
618 /* Render subregions in priority order. */
619 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
620 render_memory_region(view, subregion, base, clip,
621 readonly, nonvolatile);
624 if (!mr->terminates) {
628 offset_in_region = int128_get64(int128_sub(clip.start, base));
633 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
634 fr.romd_mode = mr->romd_mode;
635 fr.readonly = readonly;
636 fr.nonvolatile = nonvolatile;
638 /* Render the region itself into any gaps left by the current view. */
639 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
640 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
643 if (int128_lt(base, view->ranges[i].addr.start)) {
644 now = int128_min(remain,
645 int128_sub(view->ranges[i].addr.start, base));
646 fr.offset_in_region = offset_in_region;
647 fr.addr = addrrange_make(base, now);
648 flatview_insert(view, i, &fr);
650 int128_addto(&base, now);
651 offset_in_region += int128_get64(now);
652 int128_subfrom(&remain, now);
654 now = int128_sub(int128_min(int128_add(base, remain),
655 addrrange_end(view->ranges[i].addr)),
657 int128_addto(&base, now);
658 offset_in_region += int128_get64(now);
659 int128_subfrom(&remain, now);
661 if (int128_nz(remain)) {
662 fr.offset_in_region = offset_in_region;
663 fr.addr = addrrange_make(base, remain);
664 flatview_insert(view, i, &fr);
668 void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque)
675 FOR_EACH_FLAT_RANGE(fr, fv) {
676 if (cb(fr->addr.start, fr->addr.size, fr->mr,
677 fr->offset_in_region, opaque)) {
683 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
685 while (mr->enabled) {
687 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
688 /* The alias is included in its entirety. Use it as
689 * the "real" root, so that we can share more FlatViews.
694 } else if (!mr->terminates) {
695 unsigned int found = 0;
696 MemoryRegion *child, *next = NULL;
697 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
698 if (child->enabled) {
703 if (!child->addr && int128_ge(mr->size, child->size)) {
704 /* A child is included in its entirety. If it's the only
705 * enabled one, use it in the hope of finding an alias down the
706 * way. This will also let us share FlatViews.
727 /* Render a memory topology into a list of disjoint absolute ranges. */
728 static FlatView *generate_memory_topology(MemoryRegion *mr)
733 view = flatview_new(mr);
736 render_memory_region(view, mr, int128_zero(),
737 addrrange_make(int128_zero(), int128_2_64()),
740 flatview_simplify(view);
742 view->dispatch = address_space_dispatch_new(view);
743 for (i = 0; i < view->nr; i++) {
744 MemoryRegionSection mrs =
745 section_from_flat_range(&view->ranges[i], view);
746 flatview_add_to_dispatch(view, &mrs);
748 address_space_dispatch_compact(view->dispatch);
749 g_hash_table_replace(flat_views, mr, view);
754 static void address_space_add_del_ioeventfds(AddressSpace *as,
755 MemoryRegionIoeventfd *fds_new,
757 MemoryRegionIoeventfd *fds_old,
761 MemoryRegionIoeventfd *fd;
762 MemoryRegionSection section;
764 /* Generate a symmetric difference of the old and new fd sets, adding
765 * and deleting as necessary.
769 while (iold < fds_old_nb || inew < fds_new_nb) {
770 if (iold < fds_old_nb
771 && (inew == fds_new_nb
772 || memory_region_ioeventfd_before(&fds_old[iold],
775 section = (MemoryRegionSection) {
776 .fv = address_space_to_flatview(as),
777 .offset_within_address_space = int128_get64(fd->addr.start),
778 .size = fd->addr.size,
780 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion,
781 fd->match_data, fd->data, fd->e);
783 } else if (inew < fds_new_nb
784 && (iold == fds_old_nb
785 || memory_region_ioeventfd_before(&fds_new[inew],
788 section = (MemoryRegionSection) {
789 .fv = address_space_to_flatview(as),
790 .offset_within_address_space = int128_get64(fd->addr.start),
791 .size = fd->addr.size,
793 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion,
794 fd->match_data, fd->data, fd->e);
803 FlatView *address_space_get_flatview(AddressSpace *as)
807 RCU_READ_LOCK_GUARD();
809 view = address_space_to_flatview(as);
810 /* If somebody has replaced as->current_map concurrently,
811 * flatview_ref returns false.
813 } while (!flatview_ref(view));
817 static void address_space_update_ioeventfds(AddressSpace *as)
821 unsigned ioeventfd_nb = 0;
822 unsigned ioeventfd_max;
823 MemoryRegionIoeventfd *ioeventfds;
828 * It is likely that the number of ioeventfds hasn't changed much, so use
829 * the previous size as the starting value, with some headroom to avoid
830 * gratuitous reallocations.
832 ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4);
833 ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max);
835 view = address_space_get_flatview(as);
836 FOR_EACH_FLAT_RANGE(fr, view) {
837 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
838 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
839 int128_sub(fr->addr.start,
840 int128_make64(fr->offset_in_region)));
841 if (addrrange_intersects(fr->addr, tmp)) {
843 if (ioeventfd_nb > ioeventfd_max) {
844 ioeventfd_max = MAX(ioeventfd_max * 2, 4);
845 ioeventfds = g_realloc(ioeventfds,
846 ioeventfd_max * sizeof(*ioeventfds));
848 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
849 ioeventfds[ioeventfd_nb-1].addr = tmp;
854 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
855 as->ioeventfds, as->ioeventfd_nb);
857 g_free(as->ioeventfds);
858 as->ioeventfds = ioeventfds;
859 as->ioeventfd_nb = ioeventfd_nb;
860 flatview_unref(view);
864 * Notify the memory listeners about the coalesced IO change events of
865 * range `cmr'. Only the part that has intersection of the specified
866 * FlatRange will be sent.
868 static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
869 CoalescedMemoryRange *cmr, bool add)
873 tmp = addrrange_shift(cmr->addr,
874 int128_sub(fr->addr.start,
875 int128_make64(fr->offset_in_region)));
876 if (!addrrange_intersects(tmp, fr->addr)) {
879 tmp = addrrange_intersection(tmp, fr->addr);
882 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
883 int128_get64(tmp.start),
884 int128_get64(tmp.size));
886 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
887 int128_get64(tmp.start),
888 int128_get64(tmp.size));
892 static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
894 CoalescedMemoryRange *cmr;
896 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
897 flat_range_coalesced_io_notify(fr, as, cmr, false);
901 static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
903 MemoryRegion *mr = fr->mr;
904 CoalescedMemoryRange *cmr;
906 if (QTAILQ_EMPTY(&mr->coalesced)) {
910 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
911 flat_range_coalesced_io_notify(fr, as, cmr, true);
915 static void address_space_update_topology_pass(AddressSpace *as,
916 const FlatView *old_view,
917 const FlatView *new_view,
921 FlatRange *frold, *frnew;
923 /* Generate a symmetric difference of the old and new memory maps.
924 * Kill ranges in the old map, and instantiate ranges in the new map.
927 while (iold < old_view->nr || inew < new_view->nr) {
928 if (iold < old_view->nr) {
929 frold = &old_view->ranges[iold];
933 if (inew < new_view->nr) {
934 frnew = &new_view->ranges[inew];
941 || int128_lt(frold->addr.start, frnew->addr.start)
942 || (int128_eq(frold->addr.start, frnew->addr.start)
943 && !flatrange_equal(frold, frnew)))) {
944 /* In old but not in new, or in both but attributes changed. */
947 flat_range_coalesced_io_del(frold, as);
948 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
952 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
953 /* In both and unchanged (except logging may have changed) */
956 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
957 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
958 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
959 frold->dirty_log_mask,
960 frnew->dirty_log_mask);
962 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
963 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
964 frold->dirty_log_mask,
965 frnew->dirty_log_mask);
975 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
976 flat_range_coalesced_io_add(frnew, as);
984 static void flatviews_init(void)
986 static FlatView *empty_view;
992 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
993 (GDestroyNotify) flatview_unref);
995 empty_view = generate_memory_topology(NULL);
996 /* We keep it alive forever in the global variable. */
997 flatview_ref(empty_view);
999 g_hash_table_replace(flat_views, NULL, empty_view);
1000 flatview_ref(empty_view);
1004 static void flatviews_reset(void)
1009 g_hash_table_unref(flat_views);
1014 /* Render unique FVs */
1015 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1016 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1018 if (g_hash_table_lookup(flat_views, physmr)) {
1022 generate_memory_topology(physmr);
1026 static void address_space_set_flatview(AddressSpace *as)
1028 FlatView *old_view = address_space_to_flatview(as);
1029 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1030 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1034 if (old_view == new_view) {
1039 flatview_ref(old_view);
1042 flatview_ref(new_view);
1044 if (!QTAILQ_EMPTY(&as->listeners)) {
1045 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1048 old_view2 = &tmpview;
1050 address_space_update_topology_pass(as, old_view2, new_view, false);
1051 address_space_update_topology_pass(as, old_view2, new_view, true);
1054 /* Writes are protected by the BQL. */
1055 qatomic_rcu_set(&as->current_map, new_view);
1057 flatview_unref(old_view);
1060 /* Note that all the old MemoryRegions are still alive up to this
1061 * point. This relieves most MemoryListeners from the need to
1062 * ref/unref the MemoryRegions they get---unless they use them
1063 * outside the iothread mutex, in which case precise reference
1064 * counting is necessary.
1067 flatview_unref(old_view);
1071 static void address_space_update_topology(AddressSpace *as)
1073 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1076 if (!g_hash_table_lookup(flat_views, physmr)) {
1077 generate_memory_topology(physmr);
1079 address_space_set_flatview(as);
1082 void memory_region_transaction_begin(void)
1084 qemu_flush_coalesced_mmio_buffer();
1085 ++memory_region_transaction_depth;
1088 void memory_region_transaction_commit(void)
1092 assert(memory_region_transaction_depth);
1093 assert(qemu_mutex_iothread_locked());
1095 --memory_region_transaction_depth;
1096 if (!memory_region_transaction_depth) {
1097 if (memory_region_update_pending) {
1100 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1102 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1103 address_space_set_flatview(as);
1104 address_space_update_ioeventfds(as);
1106 memory_region_update_pending = false;
1107 ioeventfd_update_pending = false;
1108 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1109 } else if (ioeventfd_update_pending) {
1110 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1111 address_space_update_ioeventfds(as);
1113 ioeventfd_update_pending = false;
1118 static void memory_region_destructor_none(MemoryRegion *mr)
1122 static void memory_region_destructor_ram(MemoryRegion *mr)
1124 qemu_ram_free(mr->ram_block);
1127 static bool memory_region_need_escape(char c)
1129 return c == '/' || c == '[' || c == '\\' || c == ']';
1132 static char *memory_region_escape_name(const char *name)
1139 for (p = name; *p; p++) {
1140 bytes += memory_region_need_escape(*p) ? 4 : 1;
1142 if (bytes == p - name) {
1143 return g_memdup(name, bytes + 1);
1146 escaped = g_malloc(bytes + 1);
1147 for (p = name, q = escaped; *p; p++) {
1149 if (unlikely(memory_region_need_escape(c))) {
1152 *q++ = "0123456789abcdef"[c >> 4];
1153 c = "0123456789abcdef"[c & 15];
1161 static void memory_region_do_init(MemoryRegion *mr,
1166 mr->size = int128_make64(size);
1167 if (size == UINT64_MAX) {
1168 mr->size = int128_2_64();
1170 mr->name = g_strdup(name);
1172 mr->ram_block = NULL;
1175 char *escaped_name = memory_region_escape_name(name);
1176 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1179 owner = container_get(qdev_get_machine(), "/unattached");
1182 object_property_add_child(owner, name_array, OBJECT(mr));
1183 object_unref(OBJECT(mr));
1185 g_free(escaped_name);
1189 void memory_region_init(MemoryRegion *mr,
1194 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1195 memory_region_do_init(mr, owner, name, size);
1198 static void memory_region_get_container(Object *obj, Visitor *v,
1199 const char *name, void *opaque,
1202 MemoryRegion *mr = MEMORY_REGION(obj);
1203 char *path = (char *)"";
1205 if (mr->container) {
1206 path = object_get_canonical_path(OBJECT(mr->container));
1208 visit_type_str(v, name, &path, errp);
1209 if (mr->container) {
1214 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1217 MemoryRegion *mr = MEMORY_REGION(obj);
1219 return OBJECT(mr->container);
1222 static void memory_region_get_priority(Object *obj, Visitor *v,
1223 const char *name, void *opaque,
1226 MemoryRegion *mr = MEMORY_REGION(obj);
1227 int32_t value = mr->priority;
1229 visit_type_int32(v, name, &value, errp);
1232 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1233 void *opaque, Error **errp)
1235 MemoryRegion *mr = MEMORY_REGION(obj);
1236 uint64_t value = memory_region_size(mr);
1238 visit_type_uint64(v, name, &value, errp);
1241 static void memory_region_initfn(Object *obj)
1243 MemoryRegion *mr = MEMORY_REGION(obj);
1246 mr->ops = &unassigned_mem_ops;
1248 mr->romd_mode = true;
1249 mr->destructor = memory_region_destructor_none;
1250 QTAILQ_INIT(&mr->subregions);
1251 QTAILQ_INIT(&mr->coalesced);
1253 op = object_property_add(OBJECT(mr), "container",
1254 "link<" TYPE_MEMORY_REGION ">",
1255 memory_region_get_container,
1256 NULL, /* memory_region_set_container */
1258 op->resolve = memory_region_resolve_container;
1260 object_property_add_uint64_ptr(OBJECT(mr), "addr",
1261 &mr->addr, OBJ_PROP_FLAG_READ);
1262 object_property_add(OBJECT(mr), "priority", "uint32",
1263 memory_region_get_priority,
1264 NULL, /* memory_region_set_priority */
1266 object_property_add(OBJECT(mr), "size", "uint64",
1267 memory_region_get_size,
1268 NULL, /* memory_region_set_size, */
1272 static void iommu_memory_region_initfn(Object *obj)
1274 MemoryRegion *mr = MEMORY_REGION(obj);
1276 mr->is_iommu = true;
1279 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1282 #ifdef DEBUG_UNASSIGNED
1283 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1288 static void unassigned_mem_write(void *opaque, hwaddr addr,
1289 uint64_t val, unsigned size)
1291 #ifdef DEBUG_UNASSIGNED
1292 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1296 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1297 unsigned size, bool is_write,
1303 const MemoryRegionOps unassigned_mem_ops = {
1304 .valid.accepts = unassigned_mem_accepts,
1305 .endianness = DEVICE_NATIVE_ENDIAN,
1308 static uint64_t memory_region_ram_device_read(void *opaque,
1309 hwaddr addr, unsigned size)
1311 MemoryRegion *mr = opaque;
1312 uint64_t data = (uint64_t)~0;
1316 data = *(uint8_t *)(mr->ram_block->host + addr);
1319 data = *(uint16_t *)(mr->ram_block->host + addr);
1322 data = *(uint32_t *)(mr->ram_block->host + addr);
1325 data = *(uint64_t *)(mr->ram_block->host + addr);
1329 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1334 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1335 uint64_t data, unsigned size)
1337 MemoryRegion *mr = opaque;
1339 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1343 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1346 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1349 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1352 *(uint64_t *)(mr->ram_block->host + addr) = data;
1357 static const MemoryRegionOps ram_device_mem_ops = {
1358 .read = memory_region_ram_device_read,
1359 .write = memory_region_ram_device_write,
1360 .endianness = DEVICE_HOST_ENDIAN,
1362 .min_access_size = 1,
1363 .max_access_size = 8,
1367 .min_access_size = 1,
1368 .max_access_size = 8,
1373 bool memory_region_access_valid(MemoryRegion *mr,
1379 if (mr->ops->valid.accepts
1380 && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
1381 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1382 ", size %u, region '%s', reason: rejected\n",
1383 is_write ? "write" : "read",
1384 addr, size, memory_region_name(mr));
1388 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1389 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1390 ", size %u, region '%s', reason: unaligned\n",
1391 is_write ? "write" : "read",
1392 addr, size, memory_region_name(mr));
1396 /* Treat zero as compatibility all valid */
1397 if (!mr->ops->valid.max_access_size) {
1401 if (size > mr->ops->valid.max_access_size
1402 || size < mr->ops->valid.min_access_size) {
1403 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1404 ", size %u, region '%s', reason: invalid size "
1405 "(min:%u max:%u)\n",
1406 is_write ? "write" : "read",
1407 addr, size, memory_region_name(mr),
1408 mr->ops->valid.min_access_size,
1409 mr->ops->valid.max_access_size);
1415 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1423 if (mr->ops->read) {
1424 return access_with_adjusted_size(addr, pval, size,
1425 mr->ops->impl.min_access_size,
1426 mr->ops->impl.max_access_size,
1427 memory_region_read_accessor,
1430 return access_with_adjusted_size(addr, pval, size,
1431 mr->ops->impl.min_access_size,
1432 mr->ops->impl.max_access_size,
1433 memory_region_read_with_attrs_accessor,
1438 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1444 unsigned size = memop_size(op);
1448 return memory_region_dispatch_read(mr->alias,
1449 mr->alias_offset + addr,
1452 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1453 *pval = unassigned_mem_read(mr, addr, size);
1454 return MEMTX_DECODE_ERROR;
1457 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1458 adjust_endianness(mr, pval, op);
1462 /* Return true if an eventfd was signalled */
1463 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1469 MemoryRegionIoeventfd ioeventfd = {
1470 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1475 for (i = 0; i < mr->ioeventfd_nb; i++) {
1476 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1477 ioeventfd.e = mr->ioeventfds[i].e;
1479 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1480 event_notifier_set(ioeventfd.e);
1488 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1494 unsigned size = memop_size(op);
1497 return memory_region_dispatch_write(mr->alias,
1498 mr->alias_offset + addr,
1501 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1502 unassigned_mem_write(mr, addr, data, size);
1503 return MEMTX_DECODE_ERROR;
1506 adjust_endianness(mr, &data, op);
1508 if ((!kvm_eventfds_enabled()) &&
1509 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1513 if (mr->ops->write) {
1514 return access_with_adjusted_size(addr, &data, size,
1515 mr->ops->impl.min_access_size,
1516 mr->ops->impl.max_access_size,
1517 memory_region_write_accessor, mr,
1521 access_with_adjusted_size(addr, &data, size,
1522 mr->ops->impl.min_access_size,
1523 mr->ops->impl.max_access_size,
1524 memory_region_write_with_attrs_accessor,
1529 void memory_region_init_io(MemoryRegion *mr,
1531 const MemoryRegionOps *ops,
1536 memory_region_init(mr, owner, name, size);
1537 mr->ops = ops ? ops : &unassigned_mem_ops;
1538 mr->opaque = opaque;
1539 mr->terminates = true;
1542 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1548 memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp);
1551 void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1559 memory_region_init(mr, owner, name, size);
1561 mr->terminates = true;
1562 mr->destructor = memory_region_destructor_ram;
1563 mr->ram_block = qemu_ram_alloc(size, ram_flags, mr, &err);
1565 mr->size = int128_zero();
1566 object_unparent(OBJECT(mr));
1567 error_propagate(errp, err);
1571 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1576 void (*resized)(const char*,
1582 memory_region_init(mr, owner, name, size);
1584 mr->terminates = true;
1585 mr->destructor = memory_region_destructor_ram;
1586 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1589 mr->size = int128_zero();
1590 object_unparent(OBJECT(mr));
1591 error_propagate(errp, err);
1596 void memory_region_init_ram_from_file(MemoryRegion *mr,
1607 memory_region_init(mr, owner, name, size);
1609 mr->readonly = readonly;
1610 mr->terminates = true;
1611 mr->destructor = memory_region_destructor_ram;
1613 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path,
1616 mr->size = int128_zero();
1617 object_unparent(OBJECT(mr));
1618 error_propagate(errp, err);
1622 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1632 memory_region_init(mr, owner, name, size);
1634 mr->terminates = true;
1635 mr->destructor = memory_region_destructor_ram;
1636 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset,
1639 mr->size = int128_zero();
1640 object_unparent(OBJECT(mr));
1641 error_propagate(errp, err);
1646 void memory_region_init_ram_ptr(MemoryRegion *mr,
1652 memory_region_init(mr, owner, name, size);
1654 mr->terminates = true;
1655 mr->destructor = memory_region_destructor_ram;
1657 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1658 assert(ptr != NULL);
1659 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1662 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1668 memory_region_init(mr, owner, name, size);
1670 mr->terminates = true;
1671 mr->ram_device = true;
1672 mr->ops = &ram_device_mem_ops;
1674 mr->destructor = memory_region_destructor_ram;
1676 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1677 assert(ptr != NULL);
1678 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1681 void memory_region_init_alias(MemoryRegion *mr,
1688 memory_region_init(mr, owner, name, size);
1690 mr->alias_offset = offset;
1693 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1699 memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp);
1700 mr->readonly = true;
1703 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1705 const MemoryRegionOps *ops,
1713 memory_region_init(mr, owner, name, size);
1715 mr->opaque = opaque;
1716 mr->terminates = true;
1717 mr->rom_device = true;
1718 mr->destructor = memory_region_destructor_ram;
1719 mr->ram_block = qemu_ram_alloc(size, 0, mr, &err);
1721 mr->size = int128_zero();
1722 object_unparent(OBJECT(mr));
1723 error_propagate(errp, err);
1727 void memory_region_init_iommu(void *_iommu_mr,
1728 size_t instance_size,
1729 const char *mrtypename,
1734 struct IOMMUMemoryRegion *iommu_mr;
1735 struct MemoryRegion *mr;
1737 object_initialize(_iommu_mr, instance_size, mrtypename);
1738 mr = MEMORY_REGION(_iommu_mr);
1739 memory_region_do_init(mr, owner, name, size);
1740 iommu_mr = IOMMU_MEMORY_REGION(mr);
1741 mr->terminates = true; /* then re-forwards */
1742 QLIST_INIT(&iommu_mr->iommu_notify);
1743 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1746 static void memory_region_finalize(Object *obj)
1748 MemoryRegion *mr = MEMORY_REGION(obj);
1750 assert(!mr->container);
1752 /* We know the region is not visible in any address space (it
1753 * does not have a container and cannot be a root either because
1754 * it has no references, so we can blindly clear mr->enabled.
1755 * memory_region_set_enabled instead could trigger a transaction
1756 * and cause an infinite loop.
1758 mr->enabled = false;
1759 memory_region_transaction_begin();
1760 while (!QTAILQ_EMPTY(&mr->subregions)) {
1761 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1762 memory_region_del_subregion(mr, subregion);
1764 memory_region_transaction_commit();
1767 memory_region_clear_coalescing(mr);
1768 g_free((char *)mr->name);
1769 g_free(mr->ioeventfds);
1772 Object *memory_region_owner(MemoryRegion *mr)
1774 Object *obj = OBJECT(mr);
1778 void memory_region_ref(MemoryRegion *mr)
1780 /* MMIO callbacks most likely will access data that belongs
1781 * to the owner, hence the need to ref/unref the owner whenever
1782 * the memory region is in use.
1784 * The memory region is a child of its owner. As long as the
1785 * owner doesn't call unparent itself on the memory region,
1786 * ref-ing the owner will also keep the memory region alive.
1787 * Memory regions without an owner are supposed to never go away;
1788 * we do not ref/unref them because it slows down DMA sensibly.
1790 if (mr && mr->owner) {
1791 object_ref(mr->owner);
1795 void memory_region_unref(MemoryRegion *mr)
1797 if (mr && mr->owner) {
1798 object_unref(mr->owner);
1802 uint64_t memory_region_size(MemoryRegion *mr)
1804 if (int128_eq(mr->size, int128_2_64())) {
1807 return int128_get64(mr->size);
1810 const char *memory_region_name(const MemoryRegion *mr)
1813 ((MemoryRegion *)mr)->name =
1814 g_strdup(object_get_canonical_path_component(OBJECT(mr)));
1819 bool memory_region_is_ram_device(MemoryRegion *mr)
1821 return mr->ram_device;
1824 bool memory_region_is_protected(MemoryRegion *mr)
1826 return mr->ram && (mr->ram_block->flags & RAM_PROTECTED);
1829 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1831 uint8_t mask = mr->dirty_log_mask;
1832 RAMBlock *rb = mr->ram_block;
1834 if (global_dirty_tracking && ((rb && qemu_ram_is_migratable(rb)) ||
1835 memory_region_is_iommu(mr))) {
1836 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1839 if (tcg_enabled() && rb) {
1840 /* TCG only cares about dirty memory logging for RAM, not IOMMU. */
1841 mask |= (1 << DIRTY_MEMORY_CODE);
1846 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1848 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1851 static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
1854 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1855 IOMMUNotifier *iommu_notifier;
1856 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1859 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1860 flags |= iommu_notifier->notifier_flags;
1863 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1864 ret = imrc->notify_flag_changed(iommu_mr,
1865 iommu_mr->iommu_notify_flags,
1870 iommu_mr->iommu_notify_flags = flags;
1875 int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
1876 uint64_t page_size_mask,
1879 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1882 if (imrc->iommu_set_page_size_mask) {
1883 ret = imrc->iommu_set_page_size_mask(iommu_mr, page_size_mask, errp);
1888 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1889 IOMMUNotifier *n, Error **errp)
1891 IOMMUMemoryRegion *iommu_mr;
1895 return memory_region_register_iommu_notifier(mr->alias, n, errp);
1898 /* We need to register for at least one bitfield */
1899 iommu_mr = IOMMU_MEMORY_REGION(mr);
1900 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1901 assert(n->start <= n->end);
1902 assert(n->iommu_idx >= 0 &&
1903 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1905 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1906 ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
1908 QLIST_REMOVE(n, node);
1913 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1915 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1917 if (imrc->get_min_page_size) {
1918 return imrc->get_min_page_size(iommu_mr);
1920 return TARGET_PAGE_SIZE;
1923 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1925 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1926 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1927 hwaddr addr, granularity;
1928 IOMMUTLBEntry iotlb;
1930 /* If the IOMMU has its own replay callback, override */
1932 imrc->replay(iommu_mr, n);
1936 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1938 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1939 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
1940 if (iotlb.perm != IOMMU_NONE) {
1941 n->notify(n, &iotlb);
1944 /* if (2^64 - MR size) < granularity, it's possible to get an
1945 * infinite loop here. This should catch such a wraparound */
1946 if ((addr + granularity) < addr) {
1952 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1955 IOMMUMemoryRegion *iommu_mr;
1958 memory_region_unregister_iommu_notifier(mr->alias, n);
1961 QLIST_REMOVE(n, node);
1962 iommu_mr = IOMMU_MEMORY_REGION(mr);
1963 memory_region_update_iommu_notify_flags(iommu_mr, NULL);
1966 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
1967 IOMMUTLBEvent *event)
1969 IOMMUTLBEntry *entry = &event->entry;
1970 hwaddr entry_end = entry->iova + entry->addr_mask;
1971 IOMMUTLBEntry tmp = *entry;
1973 if (event->type == IOMMU_NOTIFIER_UNMAP) {
1974 assert(entry->perm == IOMMU_NONE);
1978 * Skip the notification if the notification does not overlap
1979 * with registered range.
1981 if (notifier->start > entry_end || notifier->end < entry->iova) {
1985 if (notifier->notifier_flags & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
1986 /* Crop (iova, addr_mask) to range */
1987 tmp.iova = MAX(tmp.iova, notifier->start);
1988 tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova;
1990 assert(entry->iova >= notifier->start && entry_end <= notifier->end);
1993 if (event->type & notifier->notifier_flags) {
1994 notifier->notify(notifier, &tmp);
1998 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
2000 IOMMUTLBEvent event)
2002 IOMMUNotifier *iommu_notifier;
2004 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
2006 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2007 if (iommu_notifier->iommu_idx == iommu_idx) {
2008 memory_region_notify_iommu_one(iommu_notifier, &event);
2013 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
2014 enum IOMMUMemoryRegionAttr attr,
2017 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2019 if (!imrc->get_attr) {
2023 return imrc->get_attr(iommu_mr, attr, data);
2026 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
2029 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2031 if (!imrc->attrs_to_index) {
2035 return imrc->attrs_to_index(iommu_mr, attrs);
2038 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2040 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2042 if (!imrc->num_indexes) {
2046 return imrc->num_indexes(iommu_mr);
2049 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr)
2051 if (!memory_region_is_mapped(mr) || !memory_region_is_ram(mr)) {
2057 void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2058 RamDiscardManager *rdm)
2060 g_assert(memory_region_is_ram(mr) && !memory_region_is_mapped(mr));
2061 g_assert(!rdm || !mr->rdm);
2065 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
2066 const MemoryRegion *mr)
2068 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2070 g_assert(rdmc->get_min_granularity);
2071 return rdmc->get_min_granularity(rdm, mr);
2074 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
2075 const MemoryRegionSection *section)
2077 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2079 g_assert(rdmc->is_populated);
2080 return rdmc->is_populated(rdm, section);
2083 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
2084 MemoryRegionSection *section,
2085 ReplayRamPopulate replay_fn,
2088 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2090 g_assert(rdmc->replay_populated);
2091 return rdmc->replay_populated(rdm, section, replay_fn, opaque);
2094 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
2095 MemoryRegionSection *section,
2096 ReplayRamDiscard replay_fn,
2099 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2101 g_assert(rdmc->replay_discarded);
2102 rdmc->replay_discarded(rdm, section, replay_fn, opaque);
2105 void ram_discard_manager_register_listener(RamDiscardManager *rdm,
2106 RamDiscardListener *rdl,
2107 MemoryRegionSection *section)
2109 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2111 g_assert(rdmc->register_listener);
2112 rdmc->register_listener(rdm, rdl, section);
2115 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
2116 RamDiscardListener *rdl)
2118 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2120 g_assert(rdmc->unregister_listener);
2121 rdmc->unregister_listener(rdm, rdl);
2124 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2126 uint8_t mask = 1 << client;
2127 uint8_t old_logging;
2129 assert(client == DIRTY_MEMORY_VGA);
2130 old_logging = mr->vga_logging_count;
2131 mr->vga_logging_count += log ? 1 : -1;
2132 if (!!old_logging == !!mr->vga_logging_count) {
2136 memory_region_transaction_begin();
2137 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2138 memory_region_update_pending |= mr->enabled;
2139 memory_region_transaction_commit();
2142 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2145 assert(mr->ram_block);
2146 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2148 memory_region_get_dirty_log_mask(mr));
2152 * If memory region `mr' is NULL, do global sync. Otherwise, sync
2153 * dirty bitmap for the specified memory region.
2155 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
2157 MemoryListener *listener;
2162 /* If the same address space has multiple log_sync listeners, we
2163 * visit that address space's FlatView multiple times. But because
2164 * log_sync listeners are rare, it's still cheaper than walking each
2165 * address space once.
2167 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2168 if (listener->log_sync) {
2169 as = listener->address_space;
2170 view = address_space_get_flatview(as);
2171 FOR_EACH_FLAT_RANGE(fr, view) {
2172 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2173 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2174 listener->log_sync(listener, &mrs);
2177 flatview_unref(view);
2178 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 0);
2179 } else if (listener->log_sync_global) {
2181 * No matter whether MR is specified, what we can do here
2182 * is to do a global sync, because we are not capable to
2183 * sync in a finer granularity.
2185 listener->log_sync_global(listener);
2186 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1);
2191 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2194 MemoryRegionSection mrs;
2195 MemoryListener *listener;
2199 hwaddr sec_start, sec_end, sec_size;
2201 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2202 if (!listener->log_clear) {
2205 as = listener->address_space;
2206 view = address_space_get_flatview(as);
2207 FOR_EACH_FLAT_RANGE(fr, view) {
2208 if (!fr->dirty_log_mask || fr->mr != mr) {
2210 * Clear dirty bitmap operation only applies to those
2211 * regions whose dirty logging is at least enabled
2216 mrs = section_from_flat_range(fr, view);
2218 sec_start = MAX(mrs.offset_within_region, start);
2219 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2220 sec_end = MIN(sec_end, start + len);
2222 if (sec_start >= sec_end) {
2224 * If this memory region section has no intersection
2225 * with the requested range, skip.
2230 /* Valid case; shrink the section if needed */
2231 mrs.offset_within_address_space +=
2232 sec_start - mrs.offset_within_region;
2233 mrs.offset_within_region = sec_start;
2234 sec_size = sec_end - sec_start;
2235 mrs.size = int128_make64(sec_size);
2236 listener->log_clear(listener, &mrs);
2238 flatview_unref(view);
2242 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2247 DirtyBitmapSnapshot *snapshot;
2248 assert(mr->ram_block);
2249 memory_region_sync_dirty_bitmap(mr);
2250 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2251 memory_global_after_dirty_log_sync();
2255 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2256 hwaddr addr, hwaddr size)
2258 assert(mr->ram_block);
2259 return cpu_physical_memory_snapshot_get_dirty(snap,
2260 memory_region_get_ram_addr(mr) + addr, size);
2263 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2265 if (mr->readonly != readonly) {
2266 memory_region_transaction_begin();
2267 mr->readonly = readonly;
2268 memory_region_update_pending |= mr->enabled;
2269 memory_region_transaction_commit();
2273 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2275 if (mr->nonvolatile != nonvolatile) {
2276 memory_region_transaction_begin();
2277 mr->nonvolatile = nonvolatile;
2278 memory_region_update_pending |= mr->enabled;
2279 memory_region_transaction_commit();
2283 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2285 if (mr->romd_mode != romd_mode) {
2286 memory_region_transaction_begin();
2287 mr->romd_mode = romd_mode;
2288 memory_region_update_pending |= mr->enabled;
2289 memory_region_transaction_commit();
2293 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2294 hwaddr size, unsigned client)
2296 assert(mr->ram_block);
2297 cpu_physical_memory_test_and_clear_dirty(
2298 memory_region_get_ram_addr(mr) + addr, size, client);
2301 int memory_region_get_fd(MemoryRegion *mr)
2305 RCU_READ_LOCK_GUARD();
2309 fd = mr->ram_block->fd;
2314 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2317 uint64_t offset = 0;
2319 RCU_READ_LOCK_GUARD();
2321 offset += mr->alias_offset;
2324 assert(mr->ram_block);
2325 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2330 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2334 block = qemu_ram_block_from_host(ptr, false, offset);
2342 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2344 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2347 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2349 assert(mr->ram_block);
2351 qemu_ram_resize(mr->ram_block, newsize, errp);
2354 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
2356 if (mr->ram_block) {
2357 qemu_ram_msync(mr->ram_block, addr, size);
2361 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
2364 * Might be extended case needed to cover
2365 * different types of memory regions
2367 if (mr->dirty_log_mask) {
2368 memory_region_msync(mr, addr, size);
2373 * Call proper memory listeners about the change on the newly
2374 * added/removed CoalescedMemoryRange.
2376 static void memory_region_update_coalesced_range(MemoryRegion *mr,
2377 CoalescedMemoryRange *cmr,
2384 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2385 view = address_space_get_flatview(as);
2386 FOR_EACH_FLAT_RANGE(fr, view) {
2388 flat_range_coalesced_io_notify(fr, as, cmr, add);
2391 flatview_unref(view);
2395 void memory_region_set_coalescing(MemoryRegion *mr)
2397 memory_region_clear_coalescing(mr);
2398 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2401 void memory_region_add_coalescing(MemoryRegion *mr,
2405 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2407 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2408 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2409 memory_region_update_coalesced_range(mr, cmr, true);
2410 memory_region_set_flush_coalesced(mr);
2413 void memory_region_clear_coalescing(MemoryRegion *mr)
2415 CoalescedMemoryRange *cmr;
2417 if (QTAILQ_EMPTY(&mr->coalesced)) {
2421 qemu_flush_coalesced_mmio_buffer();
2422 mr->flush_coalesced_mmio = false;
2424 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2425 cmr = QTAILQ_FIRST(&mr->coalesced);
2426 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2427 memory_region_update_coalesced_range(mr, cmr, false);
2432 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2434 mr->flush_coalesced_mmio = true;
2437 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2439 qemu_flush_coalesced_mmio_buffer();
2440 if (QTAILQ_EMPTY(&mr->coalesced)) {
2441 mr->flush_coalesced_mmio = false;
2445 static bool userspace_eventfd_warning;
2447 void memory_region_add_eventfd(MemoryRegion *mr,
2454 MemoryRegionIoeventfd mrfd = {
2455 .addr.start = int128_make64(addr),
2456 .addr.size = int128_make64(size),
2457 .match_data = match_data,
2463 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2464 userspace_eventfd_warning))) {
2465 userspace_eventfd_warning = true;
2466 error_report("Using eventfd without MMIO binding in KVM. "
2467 "Suboptimal performance expected");
2471 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2473 memory_region_transaction_begin();
2474 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2475 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2480 mr->ioeventfds = g_realloc(mr->ioeventfds,
2481 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2482 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2483 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2484 mr->ioeventfds[i] = mrfd;
2485 ioeventfd_update_pending |= mr->enabled;
2486 memory_region_transaction_commit();
2489 void memory_region_del_eventfd(MemoryRegion *mr,
2496 MemoryRegionIoeventfd mrfd = {
2497 .addr.start = int128_make64(addr),
2498 .addr.size = int128_make64(size),
2499 .match_data = match_data,
2506 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2508 memory_region_transaction_begin();
2509 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2510 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2514 assert(i != mr->ioeventfd_nb);
2515 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2516 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2518 mr->ioeventfds = g_realloc(mr->ioeventfds,
2519 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2520 ioeventfd_update_pending |= mr->enabled;
2521 memory_region_transaction_commit();
2524 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2526 MemoryRegion *mr = subregion->container;
2527 MemoryRegion *other;
2529 memory_region_transaction_begin();
2531 memory_region_ref(subregion);
2532 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2533 if (subregion->priority >= other->priority) {
2534 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2538 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2540 memory_region_update_pending |= mr->enabled && subregion->enabled;
2541 memory_region_transaction_commit();
2544 static void memory_region_add_subregion_common(MemoryRegion *mr,
2546 MemoryRegion *subregion)
2548 assert(!subregion->container);
2549 subregion->container = mr;
2550 subregion->addr = offset;
2551 memory_region_update_container_subregions(subregion);
2554 void memory_region_add_subregion(MemoryRegion *mr,
2556 MemoryRegion *subregion)
2558 subregion->priority = 0;
2559 memory_region_add_subregion_common(mr, offset, subregion);
2562 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2564 MemoryRegion *subregion,
2567 subregion->priority = priority;
2568 memory_region_add_subregion_common(mr, offset, subregion);
2571 void memory_region_del_subregion(MemoryRegion *mr,
2572 MemoryRegion *subregion)
2574 memory_region_transaction_begin();
2575 assert(subregion->container == mr);
2576 subregion->container = NULL;
2577 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2578 memory_region_unref(subregion);
2579 memory_region_update_pending |= mr->enabled && subregion->enabled;
2580 memory_region_transaction_commit();
2583 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2585 if (enabled == mr->enabled) {
2588 memory_region_transaction_begin();
2589 mr->enabled = enabled;
2590 memory_region_update_pending = true;
2591 memory_region_transaction_commit();
2594 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2596 Int128 s = int128_make64(size);
2598 if (size == UINT64_MAX) {
2601 if (int128_eq(s, mr->size)) {
2604 memory_region_transaction_begin();
2606 memory_region_update_pending = true;
2607 memory_region_transaction_commit();
2610 static void memory_region_readd_subregion(MemoryRegion *mr)
2612 MemoryRegion *container = mr->container;
2615 memory_region_transaction_begin();
2616 memory_region_ref(mr);
2617 memory_region_del_subregion(container, mr);
2618 mr->container = container;
2619 memory_region_update_container_subregions(mr);
2620 memory_region_unref(mr);
2621 memory_region_transaction_commit();
2625 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2627 if (addr != mr->addr) {
2629 memory_region_readd_subregion(mr);
2633 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2637 if (offset == mr->alias_offset) {
2641 memory_region_transaction_begin();
2642 mr->alias_offset = offset;
2643 memory_region_update_pending |= mr->enabled;
2644 memory_region_transaction_commit();
2647 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2652 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2654 const AddrRange *addr = addr_;
2655 const FlatRange *fr = fr_;
2657 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2659 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2665 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2667 return bsearch(&addr, view->ranges, view->nr,
2668 sizeof(FlatRange), cmp_flatrange_addr);
2671 bool memory_region_is_mapped(MemoryRegion *mr)
2673 return mr->container ? true : false;
2676 /* Same as memory_region_find, but it does not add a reference to the
2677 * returned region. It must be called from an RCU critical section.
2679 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2680 hwaddr addr, uint64_t size)
2682 MemoryRegionSection ret = { .mr = NULL };
2690 for (root = mr; root->container; ) {
2691 root = root->container;
2695 as = memory_region_to_address_space(root);
2699 range = addrrange_make(int128_make64(addr), int128_make64(size));
2701 view = address_space_to_flatview(as);
2702 fr = flatview_lookup(view, range);
2707 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2713 range = addrrange_intersection(range, fr->addr);
2714 ret.offset_within_region = fr->offset_in_region;
2715 ret.offset_within_region += int128_get64(int128_sub(range.start,
2717 ret.size = range.size;
2718 ret.offset_within_address_space = int128_get64(range.start);
2719 ret.readonly = fr->readonly;
2720 ret.nonvolatile = fr->nonvolatile;
2724 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2725 hwaddr addr, uint64_t size)
2727 MemoryRegionSection ret;
2728 RCU_READ_LOCK_GUARD();
2729 ret = memory_region_find_rcu(mr, addr, size);
2731 memory_region_ref(ret.mr);
2736 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s)
2738 MemoryRegionSection *tmp = g_new(MemoryRegionSection, 1);
2742 memory_region_ref(tmp->mr);
2745 bool ret = flatview_ref(tmp->fv);
2752 void memory_region_section_free_copy(MemoryRegionSection *s)
2755 flatview_unref(s->fv);
2758 memory_region_unref(s->mr);
2763 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2767 RCU_READ_LOCK_GUARD();
2768 mr = memory_region_find_rcu(container, addr, 1).mr;
2769 return mr && mr != container;
2772 void memory_global_dirty_log_sync(void)
2774 memory_region_sync_dirty_bitmap(NULL);
2777 void memory_global_after_dirty_log_sync(void)
2779 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
2782 static VMChangeStateEntry *vmstate_change;
2784 void memory_global_dirty_log_start(unsigned int flags)
2786 if (vmstate_change) {
2787 qemu_del_vm_change_state_handler(vmstate_change);
2788 vmstate_change = NULL;
2791 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
2792 assert(!(global_dirty_tracking & flags));
2793 global_dirty_tracking |= flags;
2795 trace_global_dirty_changed(global_dirty_tracking);
2797 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2799 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2800 memory_region_transaction_begin();
2801 memory_region_update_pending = true;
2802 memory_region_transaction_commit();
2805 static void memory_global_dirty_log_do_stop(unsigned int flags)
2807 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
2808 assert((global_dirty_tracking & flags) == flags);
2809 global_dirty_tracking &= ~flags;
2811 trace_global_dirty_changed(global_dirty_tracking);
2813 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2814 memory_region_transaction_begin();
2815 memory_region_update_pending = true;
2816 memory_region_transaction_commit();
2818 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2821 static void memory_vm_change_state_handler(void *opaque, bool running,
2824 unsigned int flags = (unsigned int)(uintptr_t)opaque;
2826 memory_global_dirty_log_do_stop(flags);
2828 if (vmstate_change) {
2829 qemu_del_vm_change_state_handler(vmstate_change);
2830 vmstate_change = NULL;
2835 void memory_global_dirty_log_stop(unsigned int flags)
2837 if (!runstate_is_running()) {
2838 if (vmstate_change) {
2841 vmstate_change = qemu_add_vm_change_state_handler(
2842 memory_vm_change_state_handler,
2843 (void *)(uintptr_t)flags);
2847 memory_global_dirty_log_do_stop(flags);
2850 static void listener_add_address_space(MemoryListener *listener,
2856 if (listener->begin) {
2857 listener->begin(listener);
2859 if (global_dirty_tracking) {
2860 if (listener->log_global_start) {
2861 listener->log_global_start(listener);
2865 view = address_space_get_flatview(as);
2866 FOR_EACH_FLAT_RANGE(fr, view) {
2867 MemoryRegionSection section = section_from_flat_range(fr, view);
2869 if (listener->region_add) {
2870 listener->region_add(listener, §ion);
2872 if (fr->dirty_log_mask && listener->log_start) {
2873 listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
2876 if (listener->commit) {
2877 listener->commit(listener);
2879 flatview_unref(view);
2882 static void listener_del_address_space(MemoryListener *listener,
2888 if (listener->begin) {
2889 listener->begin(listener);
2891 view = address_space_get_flatview(as);
2892 FOR_EACH_FLAT_RANGE(fr, view) {
2893 MemoryRegionSection section = section_from_flat_range(fr, view);
2895 if (fr->dirty_log_mask && listener->log_stop) {
2896 listener->log_stop(listener, §ion, fr->dirty_log_mask, 0);
2898 if (listener->region_del) {
2899 listener->region_del(listener, §ion);
2902 if (listener->commit) {
2903 listener->commit(listener);
2905 flatview_unref(view);
2908 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2910 MemoryListener *other = NULL;
2912 /* Only one of them can be defined for a listener */
2913 assert(!(listener->log_sync && listener->log_sync_global));
2915 listener->address_space = as;
2916 if (QTAILQ_EMPTY(&memory_listeners)
2917 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
2918 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2920 QTAILQ_FOREACH(other, &memory_listeners, link) {
2921 if (listener->priority < other->priority) {
2925 QTAILQ_INSERT_BEFORE(other, listener, link);
2928 if (QTAILQ_EMPTY(&as->listeners)
2929 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
2930 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2932 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2933 if (listener->priority < other->priority) {
2937 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2940 listener_add_address_space(listener, as);
2943 void memory_listener_unregister(MemoryListener *listener)
2945 if (!listener->address_space) {
2949 listener_del_address_space(listener, listener->address_space);
2950 QTAILQ_REMOVE(&memory_listeners, listener, link);
2951 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2952 listener->address_space = NULL;
2955 void address_space_remove_listeners(AddressSpace *as)
2957 while (!QTAILQ_EMPTY(&as->listeners)) {
2958 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
2962 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2964 memory_region_ref(root);
2966 as->current_map = NULL;
2967 as->ioeventfd_nb = 0;
2968 as->ioeventfds = NULL;
2969 QTAILQ_INIT(&as->listeners);
2970 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2971 as->name = g_strdup(name ? name : "anonymous");
2972 address_space_update_topology(as);
2973 address_space_update_ioeventfds(as);
2976 static void do_address_space_destroy(AddressSpace *as)
2978 assert(QTAILQ_EMPTY(&as->listeners));
2980 flatview_unref(as->current_map);
2982 g_free(as->ioeventfds);
2983 memory_region_unref(as->root);
2986 void address_space_destroy(AddressSpace *as)
2988 MemoryRegion *root = as->root;
2990 /* Flush out anything from MemoryListeners listening in on this */
2991 memory_region_transaction_begin();
2993 memory_region_transaction_commit();
2994 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2996 /* At this point, as->dispatch and as->current_map are dummy
2997 * entries that the guest should never use. Wait for the old
2998 * values to expire before freeing the data.
3001 call_rcu(as, do_address_space_destroy, rcu);
3004 static const char *memory_region_type(MemoryRegion *mr)
3007 return memory_region_type(mr->alias);
3009 if (memory_region_is_ram_device(mr)) {
3011 } else if (memory_region_is_romd(mr)) {
3013 } else if (memory_region_is_rom(mr)) {
3015 } else if (memory_region_is_ram(mr)) {
3022 typedef struct MemoryRegionList MemoryRegionList;
3024 struct MemoryRegionList {
3025 const MemoryRegion *mr;
3026 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
3029 typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
3031 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3032 int128_sub((size), int128_one())) : 0)
3033 #define MTREE_INDENT " "
3035 static void mtree_expand_owner(const char *label, Object *obj)
3037 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
3039 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
3040 if (dev && dev->id) {
3041 qemu_printf(" id=%s", dev->id);
3043 char *canonical_path = object_get_canonical_path(obj);
3044 if (canonical_path) {
3045 qemu_printf(" path=%s", canonical_path);
3046 g_free(canonical_path);
3048 qemu_printf(" type=%s", object_get_typename(obj));
3054 static void mtree_print_mr_owner(const MemoryRegion *mr)
3056 Object *owner = mr->owner;
3057 Object *parent = memory_region_owner((MemoryRegion *)mr);
3059 if (!owner && !parent) {
3060 qemu_printf(" orphan");
3064 mtree_expand_owner("owner", owner);
3066 if (parent && parent != owner) {
3067 mtree_expand_owner("parent", parent);
3071 static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
3073 MemoryRegionListHead *alias_print_queue,
3074 bool owner, bool display_disabled)
3076 MemoryRegionList *new_ml, *ml, *next_ml;
3077 MemoryRegionListHead submr_print_queue;
3078 const MemoryRegion *submr;
3080 hwaddr cur_start, cur_end;
3086 cur_start = base + mr->addr;
3087 cur_end = cur_start + MR_SIZE(mr->size);
3090 * Try to detect overflow of memory region. This should never
3091 * happen normally. When it happens, we dump something to warn the
3092 * user who is observing this.
3094 if (cur_start < base || cur_end < cur_start) {
3095 qemu_printf("[DETECTED OVERFLOW!] ");
3099 MemoryRegionList *ml;
3102 /* check if the alias is already in the queue */
3103 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
3104 if (ml->mr == mr->alias) {
3110 ml = g_new(MemoryRegionList, 1);
3112 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
3114 if (mr->enabled || display_disabled) {
3115 for (i = 0; i < level; i++) {
3116 qemu_printf(MTREE_INDENT);
3118 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
3119 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
3120 "-" TARGET_FMT_plx "%s",
3123 mr->nonvolatile ? "nv-" : "",
3124 memory_region_type((MemoryRegion *)mr),
3125 memory_region_name(mr),
3126 memory_region_name(mr->alias),
3128 mr->alias_offset + MR_SIZE(mr->size),
3129 mr->enabled ? "" : " [disabled]");
3131 mtree_print_mr_owner(mr);
3136 if (mr->enabled || display_disabled) {
3137 for (i = 0; i < level; i++) {
3138 qemu_printf(MTREE_INDENT);
3140 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
3141 " (prio %d, %s%s): %s%s",
3144 mr->nonvolatile ? "nv-" : "",
3145 memory_region_type((MemoryRegion *)mr),
3146 memory_region_name(mr),
3147 mr->enabled ? "" : " [disabled]");
3149 mtree_print_mr_owner(mr);
3155 QTAILQ_INIT(&submr_print_queue);
3157 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
3158 new_ml = g_new(MemoryRegionList, 1);
3160 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3161 if (new_ml->mr->addr < ml->mr->addr ||
3162 (new_ml->mr->addr == ml->mr->addr &&
3163 new_ml->mr->priority > ml->mr->priority)) {
3164 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
3170 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
3174 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3175 mtree_print_mr(ml->mr, level + 1, cur_start,
3176 alias_print_queue, owner, display_disabled);
3179 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
3184 struct FlatViewInfo {
3191 static void mtree_print_flatview(gpointer key, gpointer value,
3194 FlatView *view = key;
3195 GArray *fv_address_spaces = value;
3196 struct FlatViewInfo *fvi = user_data;
3197 FlatRange *range = &view->ranges[0];
3203 qemu_printf("FlatView #%d\n", fvi->counter);
3206 for (i = 0; i < fv_address_spaces->len; ++i) {
3207 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3208 qemu_printf(" AS \"%s\", root: %s",
3209 as->name, memory_region_name(as->root));
3210 if (as->root->alias) {
3211 qemu_printf(", alias %s", memory_region_name(as->root->alias));
3216 qemu_printf(" Root memory region: %s\n",
3217 view->root ? memory_region_name(view->root) : "(none)");
3220 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
3226 if (range->offset_in_region) {
3227 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3228 " (prio %d, %s%s): %s @" TARGET_FMT_plx,
3229 int128_get64(range->addr.start),
3230 int128_get64(range->addr.start)
3231 + MR_SIZE(range->addr.size),
3233 range->nonvolatile ? "nv-" : "",
3234 range->readonly ? "rom" : memory_region_type(mr),
3235 memory_region_name(mr),
3236 range->offset_in_region);
3238 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3239 " (prio %d, %s%s): %s",
3240 int128_get64(range->addr.start),
3241 int128_get64(range->addr.start)
3242 + MR_SIZE(range->addr.size),
3244 range->nonvolatile ? "nv-" : "",
3245 range->readonly ? "rom" : memory_region_type(mr),
3246 memory_region_name(mr));
3249 mtree_print_mr_owner(mr);
3253 for (i = 0; i < fv_address_spaces->len; ++i) {
3254 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3255 if (fvi->ac->has_memory(current_machine, as,
3256 int128_get64(range->addr.start),
3257 MR_SIZE(range->addr.size) + 1)) {
3258 qemu_printf(" %s", fvi->ac->name);
3266 #if !defined(CONFIG_USER_ONLY)
3267 if (fvi->dispatch_tree && view->root) {
3268 mtree_print_dispatch(view->dispatch, view->root);
3275 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3278 FlatView *view = key;
3279 GArray *fv_address_spaces = value;
3281 g_array_unref(fv_address_spaces);
3282 flatview_unref(view);
3287 static void mtree_info_flatview(bool dispatch_tree, bool owner)
3289 struct FlatViewInfo fvi = {
3291 .dispatch_tree = dispatch_tree,
3296 GArray *fv_address_spaces;
3297 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3298 AccelClass *ac = ACCEL_GET_CLASS(current_accel());
3300 if (ac->has_memory) {
3304 /* Gather all FVs in one table */
3305 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3306 view = address_space_get_flatview(as);
3308 fv_address_spaces = g_hash_table_lookup(views, view);
3309 if (!fv_address_spaces) {
3310 fv_address_spaces = g_array_new(false, false, sizeof(as));
3311 g_hash_table_insert(views, view, fv_address_spaces);
3314 g_array_append_val(fv_address_spaces, as);
3318 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3321 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3322 g_hash_table_unref(views);
3325 static void mtree_info_as(bool dispatch_tree, bool owner, bool disabled)
3327 MemoryRegionListHead ml_head;
3328 MemoryRegionList *ml, *ml2;
3331 QTAILQ_INIT(&ml_head);
3333 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3334 qemu_printf("address-space: %s\n", as->name);
3335 mtree_print_mr(as->root, 1, 0, &ml_head, owner, disabled);
3339 /* print aliased regions */
3340 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3341 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3342 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
3346 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3351 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
3354 mtree_info_flatview(dispatch_tree, owner);
3356 mtree_info_as(dispatch_tree, owner, disabled);
3360 void memory_region_init_ram(MemoryRegion *mr,
3366 DeviceState *owner_dev;
3369 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3371 error_propagate(errp, err);
3374 /* This will assert if owner is neither NULL nor a DeviceState.
3375 * We only want the owner here for the purposes of defining a
3376 * unique name for migration. TODO: Ideally we should implement
3377 * a naming scheme for Objects which are not DeviceStates, in
3378 * which case we can relax this restriction.
3380 owner_dev = DEVICE(owner);
3381 vmstate_register_ram(mr, owner_dev);
3384 void memory_region_init_rom(MemoryRegion *mr,
3390 DeviceState *owner_dev;
3393 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3395 error_propagate(errp, err);
3398 /* This will assert if owner is neither NULL nor a DeviceState.
3399 * We only want the owner here for the purposes of defining a
3400 * unique name for migration. TODO: Ideally we should implement
3401 * a naming scheme for Objects which are not DeviceStates, in
3402 * which case we can relax this restriction.
3404 owner_dev = DEVICE(owner);
3405 vmstate_register_ram(mr, owner_dev);
3408 void memory_region_init_rom_device(MemoryRegion *mr,
3410 const MemoryRegionOps *ops,
3416 DeviceState *owner_dev;
3419 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3422 error_propagate(errp, err);
3425 /* This will assert if owner is neither NULL nor a DeviceState.
3426 * We only want the owner here for the purposes of defining a
3427 * unique name for migration. TODO: Ideally we should implement
3428 * a naming scheme for Objects which are not DeviceStates, in
3429 * which case we can relax this restriction.
3431 owner_dev = DEVICE(owner);
3432 vmstate_register_ram(mr, owner_dev);
3436 * Support softmmu builds with CONFIG_FUZZ using a weak symbol and a stub for
3437 * the fuzz_dma_read_cb callback
3440 void __attribute__((weak)) fuzz_dma_read_cb(size_t addr,
3447 static const TypeInfo memory_region_info = {
3448 .parent = TYPE_OBJECT,
3449 .name = TYPE_MEMORY_REGION,
3450 .class_size = sizeof(MemoryRegionClass),
3451 .instance_size = sizeof(MemoryRegion),
3452 .instance_init = memory_region_initfn,
3453 .instance_finalize = memory_region_finalize,
3456 static const TypeInfo iommu_memory_region_info = {
3457 .parent = TYPE_MEMORY_REGION,
3458 .name = TYPE_IOMMU_MEMORY_REGION,
3459 .class_size = sizeof(IOMMUMemoryRegionClass),
3460 .instance_size = sizeof(IOMMUMemoryRegion),
3461 .instance_init = iommu_memory_region_initfn,
3465 static const TypeInfo ram_discard_manager_info = {
3466 .parent = TYPE_INTERFACE,
3467 .name = TYPE_RAM_DISCARD_MANAGER,
3468 .class_size = sizeof(RamDiscardManagerClass),
3471 static void memory_register_types(void)
3473 type_register_static(&memory_region_info);
3474 type_register_static(&iommu_memory_region_info);
3475 type_register_static(&ram_discard_manager_info);
3478 type_init(memory_register_types)