2 * Physical memory management API
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
17 #ifndef CONFIG_USER_ONLY
21 #include "qemu-common.h"
22 #include "cpu-common.h"
24 #include "qemu-queue.h"
29 typedef struct MemoryRegionOps MemoryRegionOps;
30 typedef struct MemoryRegion MemoryRegion;
31 typedef struct MemoryRegionPortio MemoryRegionPortio;
32 typedef struct MemoryRegionMmio MemoryRegionMmio;
34 /* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
37 #define DIRTY_MEMORY_VGA 0
38 #define DIRTY_MEMORY_CODE 1
39 #define DIRTY_MEMORY_MIGRATION 3
41 struct MemoryRegionMmio {
42 CPUReadMemoryFunc *read[3];
43 CPUWriteMemoryFunc *write[3];
46 /* Internal use; thunks between old-style IORange and MemoryRegions. */
47 typedef struct MemoryRegionIORange MemoryRegionIORange;
48 struct MemoryRegionIORange {
51 target_phys_addr_t offset;
55 * Memory region callbacks
57 struct MemoryRegionOps {
58 /* Read from the memory region. @addr is relative to @mr; @size is
60 uint64_t (*read)(void *opaque,
61 target_phys_addr_t addr,
63 /* Write to the memory region. @addr is relative to @mr; @size is
65 void (*write)(void *opaque,
66 target_phys_addr_t addr,
70 enum device_endian endianness;
71 /* Guest-visible constraints: */
73 /* If nonzero, specify bounds on access sizes beyond which a machine
76 unsigned min_access_size;
77 unsigned max_access_size;
78 /* If true, unaligned accesses are supported. Otherwise unaligned
79 * accesses throw machine checks.
83 * If present, and returns #false, the transaction is not accepted
84 * by the device (and results in machine dependent behaviour such
85 * as a machine check exception).
87 bool (*accepts)(void *opaque, target_phys_addr_t addr,
88 unsigned size, bool is_write);
90 /* Internal implementation constraints: */
92 /* If nonzero, specifies the minimum size implemented. Smaller sizes
93 * will be rounded upwards and a partial result will be returned.
95 unsigned min_access_size;
96 /* If nonzero, specifies the maximum size implemented. Larger sizes
97 * will be done as a series of accesses with smaller sizes.
99 unsigned max_access_size;
100 /* If true, unaligned accesses are supported. Otherwise all accesses
101 * are converted to (possibly multiple) naturally aligned accesses.
106 /* If .read and .write are not present, old_portio may be used for
107 * backwards compatibility with old portio registration
109 const MemoryRegionPortio *old_portio;
110 /* If .read and .write are not present, old_mmio may be used for
111 * backwards compatibility with old mmio registration
113 const MemoryRegionMmio old_mmio;
116 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
117 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
119 struct MemoryRegion {
120 /* All fields are private - violators will be prosecuted */
121 const MemoryRegionOps *ops;
123 MemoryRegion *parent;
125 target_phys_addr_t addr;
126 void (*destructor)(MemoryRegion *mr);
132 bool readonly; /* For RAM regions */
135 bool warning_printed; /* For reservations */
136 bool flush_coalesced_mmio;
138 target_phys_addr_t alias_offset;
141 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
142 QTAILQ_ENTRY(MemoryRegion) subregions_link;
143 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
145 uint8_t dirty_log_mask;
146 unsigned ioeventfd_nb;
147 MemoryRegionIoeventfd *ioeventfds;
150 struct MemoryRegionPortio {
154 IOPortReadFunc *read;
155 IOPortWriteFunc *write;
158 #define PORTIO_END_OF_LIST() { }
160 typedef struct MemoryRegionSection MemoryRegionSection;
163 * MemoryRegionSection: describes a fragment of a #MemoryRegion
165 * @mr: the region, or %NULL if empty
166 * @address_space: the address space the region is mapped in
167 * @offset_within_region: the beginning of the section, relative to @mr's start
168 * @size: the size of the section; will not exceed @mr's boundaries
169 * @offset_within_address_space: the address of the first byte of the section
170 * relative to the region's address space
171 * @readonly: writes to this section are ignored
173 struct MemoryRegionSection {
175 MemoryRegion *address_space;
176 target_phys_addr_t offset_within_region;
178 target_phys_addr_t offset_within_address_space;
182 typedef struct MemoryListener MemoryListener;
185 * MemoryListener: callbacks structure for updates to the physical memory map
187 * Allows a component to adjust to changes in the guest-visible memory map.
188 * Use with memory_listener_register() and memory_listener_unregister().
190 struct MemoryListener {
191 void (*begin)(MemoryListener *listener);
192 void (*commit)(MemoryListener *listener);
193 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
194 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
195 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
196 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
197 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
198 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
199 void (*log_global_start)(MemoryListener *listener);
200 void (*log_global_stop)(MemoryListener *listener);
201 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
202 bool match_data, uint64_t data, EventNotifier *e);
203 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
204 bool match_data, uint64_t data, EventNotifier *e);
205 /* Lower = earlier (during add), later (during del) */
207 MemoryRegion *address_space_filter;
208 QTAILQ_ENTRY(MemoryListener) link;
212 * memory_region_init: Initialize a memory region
214 * The region typically acts as a container for other memory regions. Use
215 * memory_region_add_subregion() to add subregions.
217 * @mr: the #MemoryRegion to be initialized
218 * @name: used for debugging; not visible to the user or ABI
219 * @size: size of the region; any subregions beyond this size will be clipped
221 void memory_region_init(MemoryRegion *mr,
225 * memory_region_init_io: Initialize an I/O memory region.
227 * Accesses into the region will cause the callbacks in @ops to be called.
228 * if @size is nonzero, subregions will be clipped to @size.
230 * @mr: the #MemoryRegion to be initialized.
231 * @ops: a structure containing read and write callbacks to be used when
232 * I/O is performed on the region.
233 * @opaque: passed to to the read and write callbacks of the @ops structure.
234 * @name: used for debugging; not visible to the user or ABI
235 * @size: size of the region.
237 void memory_region_init_io(MemoryRegion *mr,
238 const MemoryRegionOps *ops,
244 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
245 * region will modify memory directly.
247 * @mr: the #MemoryRegion to be initialized.
248 * @name: the name of the region.
249 * @size: size of the region.
251 void memory_region_init_ram(MemoryRegion *mr,
256 * memory_region_init_ram: Initialize RAM memory region from a user-provided.
257 * pointer. Accesses into the region will modify
260 * @mr: the #MemoryRegion to be initialized.
261 * @name: the name of the region.
262 * @size: size of the region.
263 * @ptr: memory to be mapped; must contain at least @size bytes.
265 void memory_region_init_ram_ptr(MemoryRegion *mr,
271 * memory_region_init_alias: Initialize a memory region that aliases all or a
272 * part of another memory region.
274 * @mr: the #MemoryRegion to be initialized.
275 * @name: used for debugging; not visible to the user or ABI
276 * @orig: the region to be referenced; @mr will be equivalent to
277 * @orig between @offset and @offset + @size - 1.
278 * @offset: start of the section in @orig to be referenced.
279 * @size: size of the region.
281 void memory_region_init_alias(MemoryRegion *mr,
284 target_phys_addr_t offset,
288 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
289 * handled via callbacks.
291 * @mr: the #MemoryRegion to be initialized.
292 * @ops: callbacks for write access handling.
293 * @name: the name of the region.
294 * @size: size of the region.
296 void memory_region_init_rom_device(MemoryRegion *mr,
297 const MemoryRegionOps *ops,
303 * memory_region_init_reservation: Initialize a memory region that reserves
306 * A reservation region primariy serves debugging purposes. It claims I/O
307 * space that is not supposed to be handled by QEMU itself. Any access via
308 * the memory API will cause an abort().
310 * @mr: the #MemoryRegion to be initialized
311 * @name: used for debugging; not visible to the user or ABI
312 * @size: size of the region.
314 void memory_region_init_reservation(MemoryRegion *mr,
318 * memory_region_destroy: Destroy a memory region and reclaim all resources.
320 * @mr: the region to be destroyed. May not currently be a subregion
321 * (see memory_region_add_subregion()) or referenced in an alias
322 * (see memory_region_init_alias()).
324 void memory_region_destroy(MemoryRegion *mr);
327 * memory_region_size: get a memory region's size.
329 * @mr: the memory region being queried.
331 uint64_t memory_region_size(MemoryRegion *mr);
334 * memory_region_is_ram: check whether a memory region is random access
336 * Returns %true is a memory region is random access.
338 * @mr: the memory region being queried
340 bool memory_region_is_ram(MemoryRegion *mr);
343 * memory_region_is_romd: check whether a memory region is ROMD
345 * Returns %true is a memory region is ROMD and currently set to allow
348 * @mr: the memory region being queried
350 static inline bool memory_region_is_romd(MemoryRegion *mr)
352 return mr->rom_device && mr->readable;
356 * memory_region_name: get a memory region's name
358 * Returns the string that was used to initialize the memory region.
360 * @mr: the memory region being queried
362 const char *memory_region_name(MemoryRegion *mr);
365 * memory_region_is_logging: return whether a memory region is logging writes
367 * Returns %true if the memory region is logging writes
369 * @mr: the memory region being queried
371 bool memory_region_is_logging(MemoryRegion *mr);
374 * memory_region_is_rom: check whether a memory region is ROM
376 * Returns %true is a memory region is read-only memory.
378 * @mr: the memory region being queried
380 bool memory_region_is_rom(MemoryRegion *mr);
383 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
385 * Returns a host pointer to a RAM memory region (created with
386 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
389 * @mr: the memory region being queried.
391 void *memory_region_get_ram_ptr(MemoryRegion *mr);
394 * memory_region_set_log: Turn dirty logging on or off for a region.
396 * Turns dirty logging on or off for a specified client (display, migration).
397 * Only meaningful for RAM regions.
399 * @mr: the memory region being updated.
400 * @log: whether dirty logging is to be enabled or disabled.
401 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
404 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
407 * memory_region_get_dirty: Check whether a range of bytes is dirty
408 * for a specified client.
410 * Checks whether a range of bytes has been written to since the last
411 * call to memory_region_reset_dirty() with the same @client. Dirty logging
414 * @mr: the memory region being queried.
415 * @addr: the address (relative to the start of the region) being queried.
416 * @size: the size of the range being queried.
417 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
420 bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
421 target_phys_addr_t size, unsigned client);
424 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
426 * Marks a range of bytes as dirty, after it has been dirtied outside
429 * @mr: the memory region being dirtied.
430 * @addr: the address (relative to the start of the region) being dirtied.
431 * @size: size of the range being dirtied.
433 void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr,
434 target_phys_addr_t size);
437 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
438 * any external TLBs (e.g. kvm)
440 * Flushes dirty information from accelerators such as kvm and vhost-net
441 * and makes it available to users of the memory API.
443 * @mr: the region being flushed.
445 void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
448 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
451 * Marks a range of pages as no longer dirty.
453 * @mr: the region being updated.
454 * @addr: the start of the subrange being cleaned.
455 * @size: the size of the subrange being cleaned.
456 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
459 void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
460 target_phys_addr_t size, unsigned client);
463 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
465 * Allows a memory region to be marked as read-only (turning it into a ROM).
466 * only useful on RAM regions.
468 * @mr: the region being updated.
469 * @readonly: whether rhe region is to be ROM or RAM.
471 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
474 * memory_region_rom_device_set_readable: enable/disable ROM readability
476 * Allows a ROM device (initialized with memory_region_init_rom_device() to
477 * to be marked as readable (default) or not readable. When it is readable,
478 * the device is mapped to guest memory. When not readable, reads are
479 * forwarded to the #MemoryRegion.read function.
481 * @mr: the memory region to be updated
482 * @readable: whether reads are satisified directly (%true) or via callbacks
485 void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable);
488 * memory_region_set_coalescing: Enable memory coalescing for the region.
490 * Enabled writes to a region to be queued for later processing. MMIO ->write
491 * callbacks may be delayed until a non-coalesced MMIO is issued.
492 * Only useful for IO regions. Roughly similar to write-combining hardware.
494 * @mr: the memory region to be write coalesced
496 void memory_region_set_coalescing(MemoryRegion *mr);
499 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
502 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
503 * Multiple calls can be issued coalesced disjoint ranges.
505 * @mr: the memory region to be updated.
506 * @offset: the start of the range within the region to be coalesced.
507 * @size: the size of the subrange to be coalesced.
509 void memory_region_add_coalescing(MemoryRegion *mr,
510 target_phys_addr_t offset,
514 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
516 * Disables any coalescing caused by memory_region_set_coalescing() or
517 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
520 * @mr: the memory region to be updated.
522 void memory_region_clear_coalescing(MemoryRegion *mr);
525 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
528 * Ensure that pending coalesced MMIO request are flushed before the memory
529 * region is accessed. This property is automatically enabled for all regions
530 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
532 * @mr: the memory region to be updated.
534 void memory_region_set_flush_coalesced(MemoryRegion *mr);
537 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
540 * Clear the automatic coalesced MMIO flushing enabled via
541 * memory_region_set_flush_coalesced. Note that this service has no effect on
542 * memory regions that have MMIO coalescing enabled for themselves. For them,
543 * automatic flushing will stop once coalescing is disabled.
545 * @mr: the memory region to be updated.
547 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
550 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
551 * is written to a location.
553 * Marks a word in an IO region (initialized with memory_region_init_io())
554 * as a trigger for an eventfd event. The I/O callback will not be called.
555 * The caller must be prepared to handle failure (that is, take the required
556 * action if the callback _is_ called).
558 * @mr: the memory region being updated.
559 * @addr: the address within @mr that is to be monitored
560 * @size: the size of the access to trigger the eventfd
561 * @match_data: whether to match against @data, instead of just @addr
562 * @data: the data to match against the guest write
563 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
565 void memory_region_add_eventfd(MemoryRegion *mr,
566 target_phys_addr_t addr,
573 * memory_region_del_eventfd: Cancel an eventfd.
575 * Cancels an eventfd trigger requested by a previous
576 * memory_region_add_eventfd() call.
578 * @mr: the memory region being updated.
579 * @addr: the address within @mr that is to be monitored
580 * @size: the size of the access to trigger the eventfd
581 * @match_data: whether to match against @data, instead of just @addr
582 * @data: the data to match against the guest write
583 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
585 void memory_region_del_eventfd(MemoryRegion *mr,
586 target_phys_addr_t addr,
593 * memory_region_add_subregion: Add a subregion to a container.
595 * Adds a subregion at @offset. The subregion may not overlap with other
596 * subregions (except for those explicitly marked as overlapping). A region
597 * may only be added once as a subregion (unless removed with
598 * memory_region_del_subregion()); use memory_region_init_alias() if you
599 * want a region to be a subregion in multiple locations.
601 * @mr: the region to contain the new subregion; must be a container
602 * initialized with memory_region_init().
603 * @offset: the offset relative to @mr where @subregion is added.
604 * @subregion: the subregion to be added.
606 void memory_region_add_subregion(MemoryRegion *mr,
607 target_phys_addr_t offset,
608 MemoryRegion *subregion);
610 * memory_region_add_subregion: Add a subregion to a container, with overlap.
612 * Adds a subregion at @offset. The subregion may overlap with other
613 * subregions. Conflicts are resolved by having a higher @priority hide a
614 * lower @priority. Subregions without priority are taken as @priority 0.
615 * A region may only be added once as a subregion (unless removed with
616 * memory_region_del_subregion()); use memory_region_init_alias() if you
617 * want a region to be a subregion in multiple locations.
619 * @mr: the region to contain the new subregion; must be a container
620 * initialized with memory_region_init().
621 * @offset: the offset relative to @mr where @subregion is added.
622 * @subregion: the subregion to be added.
623 * @priority: used for resolving overlaps; highest priority wins.
625 void memory_region_add_subregion_overlap(MemoryRegion *mr,
626 target_phys_addr_t offset,
627 MemoryRegion *subregion,
631 * memory_region_get_ram_addr: Get the ram address associated with a memory
634 * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
635 * code is being reworked.
637 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
640 * memory_region_del_subregion: Remove a subregion.
642 * Removes a subregion from its container.
644 * @mr: the container to be updated.
645 * @subregion: the region being removed; must be a current subregion of @mr.
647 void memory_region_del_subregion(MemoryRegion *mr,
648 MemoryRegion *subregion);
651 * memory_region_set_enabled: dynamically enable or disable a region
653 * Enables or disables a memory region. A disabled memory region
654 * ignores all accesses to itself and its subregions. It does not
655 * obscure sibling subregions with lower priority - it simply behaves as
656 * if it was removed from the hierarchy.
658 * Regions default to being enabled.
660 * @mr: the region to be updated
661 * @enabled: whether to enable or disable the region
663 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
666 * memory_region_set_address: dynamically update the address of a region
668 * Dynamically updates the address of a region, relative to its parent.
669 * May be used on regions are currently part of a memory hierarchy.
671 * @mr: the region to be updated
672 * @addr: new address, relative to parent region
674 void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr);
677 * memory_region_set_alias_offset: dynamically update a memory alias's offset
679 * Dynamically updates the offset into the target region that an alias points
680 * to, as if the fourth argument to memory_region_init_alias() has changed.
682 * @mr: the #MemoryRegion to be updated; should be an alias.
683 * @offset: the new offset into the target memory region
685 void memory_region_set_alias_offset(MemoryRegion *mr,
686 target_phys_addr_t offset);
689 * memory_region_find: locate a MemoryRegion in an address space
691 * Locates the first #MemoryRegion within an address space given by
692 * @address_space that overlaps the range given by @addr and @size.
694 * Returns a #MemoryRegionSection that describes a contiguous overlap.
695 * It will have the following characteristics:
696 * .@offset_within_address_space >= @addr
697 * .@offset_within_address_space + .@size <= @addr + @size
698 * .@size = 0 iff no overlap was found
699 * .@mr is non-%NULL iff an overlap was found
701 * @address_space: a top-level (i.e. parentless) region that contains
702 * the region to be found
703 * @addr: start of the area within @address_space to be searched
704 * @size: size of the area to be searched
706 MemoryRegionSection memory_region_find(MemoryRegion *address_space,
707 target_phys_addr_t addr, uint64_t size);
710 * memory_region_section_addr: get offset within MemoryRegionSection
712 * Returns offset within MemoryRegionSection
714 * @section: the memory region section being queried
715 * @addr: address in address space
717 static inline target_phys_addr_t
718 memory_region_section_addr(MemoryRegionSection *section,
719 target_phys_addr_t addr)
721 addr -= section->offset_within_address_space;
722 addr += section->offset_within_region;
727 * memory_global_sync_dirty_bitmap: synchronize the dirty log for all memory
729 * Synchronizes the dirty page log for an entire address space.
730 * @address_space: a top-level (i.e. parentless) region that contains the
731 * memory being synchronized
733 void memory_global_sync_dirty_bitmap(MemoryRegion *address_space);
736 * memory_region_transaction_begin: Start a transaction.
738 * During a transaction, changes will be accumulated and made visible
739 * only when the transaction ends (is committed).
741 void memory_region_transaction_begin(void);
744 * memory_region_transaction_commit: Commit a transaction and make changes
745 * visible to the guest.
747 void memory_region_transaction_commit(void);
750 * memory_listener_register: register callbacks to be called when memory
751 * sections are mapped or unmapped into an address
754 * @listener: an object containing the callbacks to be called
755 * @filter: if non-%NULL, only regions in this address space will be observed
757 void memory_listener_register(MemoryListener *listener, MemoryRegion *filter);
760 * memory_listener_unregister: undo the effect of memory_listener_register()
762 * @listener: an object containing the callbacks to be removed
764 void memory_listener_unregister(MemoryListener *listener);
767 * memory_global_dirty_log_start: begin dirty logging for all regions
769 void memory_global_dirty_log_start(void);
772 * memory_global_dirty_log_stop: begin dirty logging for all regions
774 void memory_global_dirty_log_stop(void);
776 void mtree_info(fprintf_function mon_printf, void *f);