4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
27 #include "exec/target_page.h"
29 #include "hw/qdev-core.h"
30 #include "hw/qdev-properties.h"
31 #if !defined(CONFIG_USER_ONLY)
32 #include "hw/boards.h"
33 #include "hw/xen/xen.h"
35 #include "sysemu/kvm.h"
36 #include "sysemu/sysemu.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
44 #include "exec/memory.h"
45 #include "exec/ioport.h"
46 #include "sysemu/dma.h"
47 #include "sysemu/numa.h"
48 #include "sysemu/hw_accel.h"
49 #include "exec/address-spaces.h"
50 #include "sysemu/xen-mapcache.h"
51 #include "trace-root.h"
53 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
55 #include <linux/falloc.h>
59 #include "qemu/rcu_queue.h"
60 #include "qemu/main-loop.h"
61 #include "translate-all.h"
62 #include "sysemu/replay.h"
64 #include "exec/memory-internal.h"
65 #include "exec/ram_addr.h"
68 #include "migration/vmstate.h"
70 #include "qemu/range.h"
72 #include "qemu/mmap-alloc.h"
75 #include "monitor/monitor.h"
77 //#define DEBUG_SUBPAGE
79 #if !defined(CONFIG_USER_ONLY)
80 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
81 * are protected by the ramlist lock.
83 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
85 static MemoryRegion *system_memory;
86 static MemoryRegion *system_io;
88 AddressSpace address_space_io;
89 AddressSpace address_space_memory;
91 MemoryRegion io_mem_rom, io_mem_notdirty;
92 static MemoryRegion io_mem_unassigned;
94 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
95 #define RAM_PREALLOC (1 << 0)
97 /* RAM is mmap-ed with MAP_SHARED */
98 #define RAM_SHARED (1 << 1)
100 /* Only a portion of RAM (used_length) is actually used, and migrated.
101 * This used_length size can change across reboots.
103 #define RAM_RESIZEABLE (1 << 2)
107 #ifdef TARGET_PAGE_BITS_VARY
108 int target_page_bits;
109 bool target_page_bits_decided;
112 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
113 /* current CPU in the current thread. It is only valid inside
115 __thread CPUState *current_cpu;
116 /* 0 = Do not count executed instructions.
117 1 = Precise instruction counting.
118 2 = Adaptive rate instruction counting. */
121 uintptr_t qemu_host_page_size;
122 intptr_t qemu_host_page_mask;
123 uintptr_t qemu_real_host_page_size;
124 intptr_t qemu_real_host_page_mask;
126 bool set_preferred_target_page_bits(int bits)
128 /* The target page size is the lowest common denominator for all
129 * the CPUs in the system, so we can only make it smaller, never
130 * larger. And we can't make it smaller once we've committed to
133 #ifdef TARGET_PAGE_BITS_VARY
134 assert(bits >= TARGET_PAGE_BITS_MIN);
135 if (target_page_bits == 0 || target_page_bits > bits) {
136 if (target_page_bits_decided) {
139 target_page_bits = bits;
145 #if !defined(CONFIG_USER_ONLY)
147 static void finalize_target_page_bits(void)
149 #ifdef TARGET_PAGE_BITS_VARY
150 if (target_page_bits == 0) {
151 target_page_bits = TARGET_PAGE_BITS_MIN;
153 target_page_bits_decided = true;
157 typedef struct PhysPageEntry PhysPageEntry;
159 struct PhysPageEntry {
160 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
162 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
166 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
168 /* Size of the L2 (and L3, etc) page tables. */
169 #define ADDR_SPACE_BITS 64
172 #define P_L2_SIZE (1 << P_L2_BITS)
174 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
176 typedef PhysPageEntry Node[P_L2_SIZE];
178 typedef struct PhysPageMap {
181 unsigned sections_nb;
182 unsigned sections_nb_alloc;
184 unsigned nodes_nb_alloc;
186 MemoryRegionSection *sections;
189 struct AddressSpaceDispatch {
190 MemoryRegionSection *mru_section;
191 /* This is a multi-level map on the physical address space.
192 * The bottom level has pointers to MemoryRegionSections.
194 PhysPageEntry phys_map;
198 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
199 typedef struct subpage_t {
203 uint16_t sub_section[];
206 #define PHYS_SECTION_UNASSIGNED 0
207 #define PHYS_SECTION_NOTDIRTY 1
208 #define PHYS_SECTION_ROM 2
209 #define PHYS_SECTION_WATCH 3
211 static void io_mem_init(void);
212 static void memory_map_init(void);
213 static void tcg_commit(MemoryListener *listener);
215 static MemoryRegion io_mem_watch;
218 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
219 * @cpu: the CPU whose AddressSpace this is
220 * @as: the AddressSpace itself
221 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
222 * @tcg_as_listener: listener for tracking changes to the AddressSpace
224 struct CPUAddressSpace {
227 struct AddressSpaceDispatch *memory_dispatch;
228 MemoryListener tcg_as_listener;
231 struct DirtyBitmapSnapshot {
234 unsigned long dirty[];
239 #if !defined(CONFIG_USER_ONLY)
241 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
243 static unsigned alloc_hint = 16;
244 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
245 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
246 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
247 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
248 alloc_hint = map->nodes_nb_alloc;
252 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
259 ret = map->nodes_nb++;
261 assert(ret != PHYS_MAP_NODE_NIL);
262 assert(ret != map->nodes_nb_alloc);
264 e.skip = leaf ? 0 : 1;
265 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
266 for (i = 0; i < P_L2_SIZE; ++i) {
267 memcpy(&p[i], &e, sizeof(e));
272 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
273 hwaddr *index, hwaddr *nb, uint16_t leaf,
277 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
279 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
280 lp->ptr = phys_map_node_alloc(map, level == 0);
282 p = map->nodes[lp->ptr];
283 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
285 while (*nb && lp < &p[P_L2_SIZE]) {
286 if ((*index & (step - 1)) == 0 && *nb >= step) {
292 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
298 static void phys_page_set(AddressSpaceDispatch *d,
299 hwaddr index, hwaddr nb,
302 /* Wildly overreserve - it doesn't matter much. */
303 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
305 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
308 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
309 * and update our entry so we can skip it and go directly to the destination.
311 static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
313 unsigned valid_ptr = P_L2_SIZE;
318 if (lp->ptr == PHYS_MAP_NODE_NIL) {
323 for (i = 0; i < P_L2_SIZE; i++) {
324 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
331 phys_page_compact(&p[i], nodes);
335 /* We can only compress if there's only one child. */
340 assert(valid_ptr < P_L2_SIZE);
342 /* Don't compress if it won't fit in the # of bits we have. */
343 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
347 lp->ptr = p[valid_ptr].ptr;
348 if (!p[valid_ptr].skip) {
349 /* If our only child is a leaf, make this a leaf. */
350 /* By design, we should have made this node a leaf to begin with so we
351 * should never reach here.
352 * But since it's so simple to handle this, let's do it just in case we
357 lp->skip += p[valid_ptr].skip;
361 void address_space_dispatch_compact(AddressSpaceDispatch *d)
363 if (d->phys_map.skip) {
364 phys_page_compact(&d->phys_map, d->map.nodes);
368 static inline bool section_covers_addr(const MemoryRegionSection *section,
371 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
372 * the section must cover the entire address space.
374 return int128_gethi(section->size) ||
375 range_covers_byte(section->offset_within_address_space,
376 int128_getlo(section->size), addr);
379 static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
381 PhysPageEntry lp = d->phys_map, *p;
382 Node *nodes = d->map.nodes;
383 MemoryRegionSection *sections = d->map.sections;
384 hwaddr index = addr >> TARGET_PAGE_BITS;
387 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
388 if (lp.ptr == PHYS_MAP_NODE_NIL) {
389 return §ions[PHYS_SECTION_UNASSIGNED];
392 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
395 if (section_covers_addr(§ions[lp.ptr], addr)) {
396 return §ions[lp.ptr];
398 return §ions[PHYS_SECTION_UNASSIGNED];
402 bool memory_region_is_unassigned(MemoryRegion *mr)
404 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
405 && mr != &io_mem_watch;
408 /* Called from RCU critical section */
409 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
411 bool resolve_subpage)
413 MemoryRegionSection *section = atomic_read(&d->mru_section);
417 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
418 section_covers_addr(section, addr)) {
421 section = phys_page_find(d, addr);
424 if (resolve_subpage && section->mr->subpage) {
425 subpage = container_of(section->mr, subpage_t, iomem);
426 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
429 atomic_set(&d->mru_section, section);
434 /* Called from RCU critical section */
435 static MemoryRegionSection *
436 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
437 hwaddr *plen, bool resolve_subpage)
439 MemoryRegionSection *section;
443 section = address_space_lookup_region(d, addr, resolve_subpage);
444 /* Compute offset within MemoryRegionSection */
445 addr -= section->offset_within_address_space;
447 /* Compute offset within MemoryRegion */
448 *xlat = addr + section->offset_within_region;
452 /* MMIO registers can be expected to perform full-width accesses based only
453 * on their address, without considering adjacent registers that could
454 * decode to completely different MemoryRegions. When such registers
455 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
456 * regions overlap wildly. For this reason we cannot clamp the accesses
459 * If the length is small (as is the case for address_space_ldl/stl),
460 * everything works fine. If the incoming length is large, however,
461 * the caller really has to do the clamping through memory_access_size.
463 if (memory_region_is_ram(mr)) {
464 diff = int128_sub(section->size, int128_make64(addr));
465 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
470 /* Called from RCU critical section */
471 static MemoryRegionSection flatview_do_translate(FlatView *fv,
477 AddressSpace **target_as)
480 MemoryRegionSection *section;
481 IOMMUMemoryRegion *iommu_mr;
482 IOMMUMemoryRegionClass *imrc;
485 section = address_space_translate_internal(
486 flatview_to_dispatch(fv), addr, &addr,
489 iommu_mr = memory_region_get_iommu(section->mr);
493 imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
495 iotlb = imrc->translate(iommu_mr, addr, is_write ?
496 IOMMU_WO : IOMMU_RO);
497 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
498 | (addr & iotlb.addr_mask));
499 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
500 if (!(iotlb.perm & (1 << is_write))) {
504 fv = address_space_to_flatview(iotlb.target_as);
505 *target_as = iotlb.target_as;
513 return (MemoryRegionSection) { .mr = &io_mem_unassigned };
516 /* Called from RCU critical section */
517 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
520 MemoryRegionSection section;
523 /* Try to get maximum page mask during translation. */
526 /* This can never be MMIO. */
527 section = flatview_do_translate(address_space_to_flatview(as), addr,
528 &xlat, &plen, is_write, false, &as);
530 /* Illegal translation */
531 if (section.mr == &io_mem_unassigned) {
535 /* Convert memory region offset into address space offset */
536 xlat += section.offset_within_address_space -
537 section.offset_within_region;
539 if (plen == (hwaddr)-1) {
541 * We use default page size here. Logically it only happens
542 * for identity mappings.
544 plen = TARGET_PAGE_SIZE;
547 /* Convert to address mask */
550 return (IOMMUTLBEntry) {
552 .iova = addr & ~plen,
553 .translated_addr = xlat & ~plen,
555 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
560 return (IOMMUTLBEntry) {0};
563 /* Called from RCU critical section */
564 MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
565 hwaddr *plen, bool is_write)
568 MemoryRegionSection section;
569 AddressSpace *as = NULL;
571 /* This can be MMIO, so setup MMIO bit. */
572 section = flatview_do_translate(fv, addr, xlat, plen, is_write, true, &as);
575 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
576 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
577 *plen = MIN(page, *plen);
583 /* Called from RCU critical section */
584 MemoryRegionSection *
585 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
586 hwaddr *xlat, hwaddr *plen)
588 MemoryRegionSection *section;
589 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
591 section = address_space_translate_internal(d, addr, xlat, plen, false);
593 assert(!memory_region_is_iommu(section->mr));
598 #if !defined(CONFIG_USER_ONLY)
600 static int cpu_common_post_load(void *opaque, int version_id)
602 CPUState *cpu = opaque;
604 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
605 version_id is increased. */
606 cpu->interrupt_request &= ~0x01;
612 static int cpu_common_pre_load(void *opaque)
614 CPUState *cpu = opaque;
616 cpu->exception_index = -1;
621 static bool cpu_common_exception_index_needed(void *opaque)
623 CPUState *cpu = opaque;
625 return tcg_enabled() && cpu->exception_index != -1;
628 static const VMStateDescription vmstate_cpu_common_exception_index = {
629 .name = "cpu_common/exception_index",
631 .minimum_version_id = 1,
632 .needed = cpu_common_exception_index_needed,
633 .fields = (VMStateField[]) {
634 VMSTATE_INT32(exception_index, CPUState),
635 VMSTATE_END_OF_LIST()
639 static bool cpu_common_crash_occurred_needed(void *opaque)
641 CPUState *cpu = opaque;
643 return cpu->crash_occurred;
646 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
647 .name = "cpu_common/crash_occurred",
649 .minimum_version_id = 1,
650 .needed = cpu_common_crash_occurred_needed,
651 .fields = (VMStateField[]) {
652 VMSTATE_BOOL(crash_occurred, CPUState),
653 VMSTATE_END_OF_LIST()
657 const VMStateDescription vmstate_cpu_common = {
658 .name = "cpu_common",
660 .minimum_version_id = 1,
661 .pre_load = cpu_common_pre_load,
662 .post_load = cpu_common_post_load,
663 .fields = (VMStateField[]) {
664 VMSTATE_UINT32(halted, CPUState),
665 VMSTATE_UINT32(interrupt_request, CPUState),
666 VMSTATE_END_OF_LIST()
668 .subsections = (const VMStateDescription*[]) {
669 &vmstate_cpu_common_exception_index,
670 &vmstate_cpu_common_crash_occurred,
677 CPUState *qemu_get_cpu(int index)
682 if (cpu->cpu_index == index) {
690 #if !defined(CONFIG_USER_ONLY)
691 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
693 CPUAddressSpace *newas;
695 /* Target code should have set num_ases before calling us */
696 assert(asidx < cpu->num_ases);
699 /* address space 0 gets the convenience alias */
703 /* KVM cannot currently support multiple address spaces. */
704 assert(asidx == 0 || !kvm_enabled());
706 if (!cpu->cpu_ases) {
707 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
710 newas = &cpu->cpu_ases[asidx];
714 newas->tcg_as_listener.commit = tcg_commit;
715 memory_listener_register(&newas->tcg_as_listener, as);
719 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
721 /* Return the AddressSpace corresponding to the specified index */
722 return cpu->cpu_ases[asidx].as;
726 void cpu_exec_unrealizefn(CPUState *cpu)
728 CPUClass *cc = CPU_GET_CLASS(cpu);
730 cpu_list_remove(cpu);
732 if (cc->vmsd != NULL) {
733 vmstate_unregister(NULL, cc->vmsd, cpu);
735 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
736 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
740 Property cpu_common_props[] = {
741 #ifndef CONFIG_USER_ONLY
742 /* Create a memory property for softmmu CPU object,
743 * so users can wire up its memory. (This can't go in qom/cpu.c
744 * because that file is compiled only once for both user-mode
745 * and system builds.) The default if no link is set up is to use
746 * the system address space.
748 DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
751 DEFINE_PROP_END_OF_LIST(),
754 void cpu_exec_initfn(CPUState *cpu)
759 #ifndef CONFIG_USER_ONLY
760 cpu->thread_id = qemu_get_thread_id();
761 cpu->memory = system_memory;
762 object_ref(OBJECT(cpu->memory));
766 void cpu_exec_realizefn(CPUState *cpu, Error **errp)
768 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
772 #ifndef CONFIG_USER_ONLY
773 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
774 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
776 if (cc->vmsd != NULL) {
777 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
782 #if defined(CONFIG_USER_ONLY)
783 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
787 tb_invalidate_phys_page_range(pc, pc + 1, 0);
792 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
795 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
796 int asidx = cpu_asidx_from_attrs(cpu, attrs);
798 /* Locks grabbed by tb_invalidate_phys_addr */
799 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
800 phys | (pc & ~TARGET_PAGE_MASK));
805 #if defined(CONFIG_USER_ONLY)
806 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
811 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
817 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
821 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
822 int flags, CPUWatchpoint **watchpoint)
827 /* Add a watchpoint. */
828 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
829 int flags, CPUWatchpoint **watchpoint)
833 /* forbid ranges which are empty or run off the end of the address space */
834 if (len == 0 || (addr + len - 1) < addr) {
835 error_report("tried to set invalid watchpoint at %"
836 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
839 wp = g_malloc(sizeof(*wp));
845 /* keep all GDB-injected watchpoints in front */
846 if (flags & BP_GDB) {
847 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
849 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
852 tlb_flush_page(cpu, addr);
859 /* Remove a specific watchpoint. */
860 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
865 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
866 if (addr == wp->vaddr && len == wp->len
867 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
868 cpu_watchpoint_remove_by_ref(cpu, wp);
875 /* Remove a specific watchpoint by reference. */
876 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
878 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
880 tlb_flush_page(cpu, watchpoint->vaddr);
885 /* Remove all matching watchpoints. */
886 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
888 CPUWatchpoint *wp, *next;
890 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
891 if (wp->flags & mask) {
892 cpu_watchpoint_remove_by_ref(cpu, wp);
897 /* Return true if this watchpoint address matches the specified
898 * access (ie the address range covered by the watchpoint overlaps
899 * partially or completely with the address range covered by the
902 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
906 /* We know the lengths are non-zero, but a little caution is
907 * required to avoid errors in the case where the range ends
908 * exactly at the top of the address space and so addr + len
909 * wraps round to zero.
911 vaddr wpend = wp->vaddr + wp->len - 1;
912 vaddr addrend = addr + len - 1;
914 return !(addr > wpend || wp->vaddr > addrend);
919 /* Add a breakpoint. */
920 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
921 CPUBreakpoint **breakpoint)
925 bp = g_malloc(sizeof(*bp));
930 /* keep all GDB-injected breakpoints in front */
931 if (flags & BP_GDB) {
932 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
934 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
937 breakpoint_invalidate(cpu, pc);
945 /* Remove a specific breakpoint. */
946 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
950 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
951 if (bp->pc == pc && bp->flags == flags) {
952 cpu_breakpoint_remove_by_ref(cpu, bp);
959 /* Remove a specific breakpoint by reference. */
960 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
962 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
964 breakpoint_invalidate(cpu, breakpoint->pc);
969 /* Remove all matching breakpoints. */
970 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
972 CPUBreakpoint *bp, *next;
974 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
975 if (bp->flags & mask) {
976 cpu_breakpoint_remove_by_ref(cpu, bp);
981 /* enable or disable single step mode. EXCP_DEBUG is returned by the
982 CPU loop after each instruction */
983 void cpu_single_step(CPUState *cpu, int enabled)
985 if (cpu->singlestep_enabled != enabled) {
986 cpu->singlestep_enabled = enabled;
988 kvm_update_guest_debug(cpu, 0);
990 /* must flush all the translated code to avoid inconsistencies */
991 /* XXX: only flush what is necessary */
997 void cpu_abort(CPUState *cpu, const char *fmt, ...)
1004 fprintf(stderr, "qemu: fatal: ");
1005 vfprintf(stderr, fmt, ap);
1006 fprintf(stderr, "\n");
1007 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1008 if (qemu_log_separate()) {
1010 qemu_log("qemu: fatal: ");
1011 qemu_log_vprintf(fmt, ap2);
1013 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1021 #if defined(CONFIG_USER_ONLY)
1023 struct sigaction act;
1024 sigfillset(&act.sa_mask);
1025 act.sa_handler = SIG_DFL;
1026 sigaction(SIGABRT, &act, NULL);
1032 #if !defined(CONFIG_USER_ONLY)
1033 /* Called from RCU critical section */
1034 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
1038 block = atomic_rcu_read(&ram_list.mru_block);
1039 if (block && addr - block->offset < block->max_length) {
1042 RAMBLOCK_FOREACH(block) {
1043 if (addr - block->offset < block->max_length) {
1048 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1052 /* It is safe to write mru_block outside the iothread lock. This
1057 * xxx removed from list
1061 * call_rcu(reclaim_ramblock, xxx);
1064 * atomic_rcu_set is not needed here. The block was already published
1065 * when it was placed into the list. Here we're just making an extra
1066 * copy of the pointer.
1068 ram_list.mru_block = block;
1072 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
1079 end = TARGET_PAGE_ALIGN(start + length);
1080 start &= TARGET_PAGE_MASK;
1083 block = qemu_get_ram_block(start);
1084 assert(block == qemu_get_ram_block(end - 1));
1085 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
1087 tlb_reset_dirty(cpu, start1, length);
1092 /* Note: start and end must be within the same ram block. */
1093 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1097 DirtyMemoryBlocks *blocks;
1098 unsigned long end, page;
1105 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1106 page = start >> TARGET_PAGE_BITS;
1110 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1112 while (page < end) {
1113 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1114 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1115 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1117 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1124 if (dirty && tcg_enabled()) {
1125 tlb_reset_dirty_range_all(start, length);
1131 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
1132 (ram_addr_t start, ram_addr_t length, unsigned client)
1134 DirtyMemoryBlocks *blocks;
1135 unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
1136 ram_addr_t first = QEMU_ALIGN_DOWN(start, align);
1137 ram_addr_t last = QEMU_ALIGN_UP(start + length, align);
1138 DirtyBitmapSnapshot *snap;
1139 unsigned long page, end, dest;
1141 snap = g_malloc0(sizeof(*snap) +
1142 ((last - first) >> (TARGET_PAGE_BITS + 3)));
1143 snap->start = first;
1146 page = first >> TARGET_PAGE_BITS;
1147 end = last >> TARGET_PAGE_BITS;
1152 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1154 while (page < end) {
1155 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1156 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1157 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1159 assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
1160 assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
1161 offset >>= BITS_PER_LEVEL;
1163 bitmap_copy_and_clear_atomic(snap->dirty + dest,
1164 blocks->blocks[idx] + offset,
1167 dest += num >> BITS_PER_LEVEL;
1172 if (tcg_enabled()) {
1173 tlb_reset_dirty_range_all(start, length);
1179 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
1183 unsigned long page, end;
1185 assert(start >= snap->start);
1186 assert(start + length <= snap->end);
1188 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS;
1189 page = (start - snap->start) >> TARGET_PAGE_BITS;
1191 while (page < end) {
1192 if (test_bit(page, snap->dirty)) {
1200 /* Called from RCU critical section */
1201 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1202 MemoryRegionSection *section,
1204 hwaddr paddr, hwaddr xlat,
1206 target_ulong *address)
1211 if (memory_region_is_ram(section->mr)) {
1213 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1214 if (!section->readonly) {
1215 iotlb |= PHYS_SECTION_NOTDIRTY;
1217 iotlb |= PHYS_SECTION_ROM;
1220 AddressSpaceDispatch *d;
1222 d = flatview_to_dispatch(section->fv);
1223 iotlb = section - d->map.sections;
1227 /* Make accesses to pages with watchpoints go via the
1228 watchpoint trap routines. */
1229 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1230 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1231 /* Avoid trapping reads of pages with a write breakpoint. */
1232 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1233 iotlb = PHYS_SECTION_WATCH + paddr;
1234 *address |= TLB_MMIO;
1242 #endif /* defined(CONFIG_USER_ONLY) */
1244 #if !defined(CONFIG_USER_ONLY)
1246 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1248 static subpage_t *subpage_init(FlatView *fv, hwaddr base);
1250 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1251 qemu_anon_ram_alloc;
1254 * Set a custom physical guest memory alloator.
1255 * Accelerators with unusual needs may need this. Hopefully, we can
1256 * get rid of it eventually.
1258 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1260 phys_mem_alloc = alloc;
1263 static uint16_t phys_section_add(PhysPageMap *map,
1264 MemoryRegionSection *section)
1266 /* The physical section number is ORed with a page-aligned
1267 * pointer to produce the iotlb entries. Thus it should
1268 * never overflow into the page-aligned value.
1270 assert(map->sections_nb < TARGET_PAGE_SIZE);
1272 if (map->sections_nb == map->sections_nb_alloc) {
1273 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1274 map->sections = g_renew(MemoryRegionSection, map->sections,
1275 map->sections_nb_alloc);
1277 map->sections[map->sections_nb] = *section;
1278 memory_region_ref(section->mr);
1279 return map->sections_nb++;
1282 static void phys_section_destroy(MemoryRegion *mr)
1284 bool have_sub_page = mr->subpage;
1286 memory_region_unref(mr);
1288 if (have_sub_page) {
1289 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1290 object_unref(OBJECT(&subpage->iomem));
1295 static void phys_sections_free(PhysPageMap *map)
1297 while (map->sections_nb > 0) {
1298 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1299 phys_section_destroy(section->mr);
1301 g_free(map->sections);
1305 static void register_subpage(FlatView *fv, MemoryRegionSection *section)
1307 AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1309 hwaddr base = section->offset_within_address_space
1311 MemoryRegionSection *existing = phys_page_find(d, base);
1312 MemoryRegionSection subsection = {
1313 .offset_within_address_space = base,
1314 .size = int128_make64(TARGET_PAGE_SIZE),
1318 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1320 if (!(existing->mr->subpage)) {
1321 subpage = subpage_init(fv, base);
1323 subsection.mr = &subpage->iomem;
1324 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1325 phys_section_add(&d->map, &subsection));
1327 subpage = container_of(existing->mr, subpage_t, iomem);
1329 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1330 end = start + int128_get64(section->size) - 1;
1331 subpage_register(subpage, start, end,
1332 phys_section_add(&d->map, section));
1336 static void register_multipage(FlatView *fv,
1337 MemoryRegionSection *section)
1339 AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1340 hwaddr start_addr = section->offset_within_address_space;
1341 uint16_t section_index = phys_section_add(&d->map, section);
1342 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1346 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1349 void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
1351 MemoryRegionSection now = *section, remain = *section;
1352 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1354 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1355 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1356 - now.offset_within_address_space;
1358 now.size = int128_min(int128_make64(left), now.size);
1359 register_subpage(fv, &now);
1361 now.size = int128_zero();
1363 while (int128_ne(remain.size, now.size)) {
1364 remain.size = int128_sub(remain.size, now.size);
1365 remain.offset_within_address_space += int128_get64(now.size);
1366 remain.offset_within_region += int128_get64(now.size);
1368 if (int128_lt(remain.size, page_size)) {
1369 register_subpage(fv, &now);
1370 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1371 now.size = page_size;
1372 register_subpage(fv, &now);
1374 now.size = int128_and(now.size, int128_neg(page_size));
1375 register_multipage(fv, &now);
1380 void qemu_flush_coalesced_mmio_buffer(void)
1383 kvm_flush_coalesced_mmio_buffer();
1386 void qemu_mutex_lock_ramlist(void)
1388 qemu_mutex_lock(&ram_list.mutex);
1391 void qemu_mutex_unlock_ramlist(void)
1393 qemu_mutex_unlock(&ram_list.mutex);
1396 void ram_block_dump(Monitor *mon)
1402 monitor_printf(mon, "%24s %8s %18s %18s %18s\n",
1403 "Block Name", "PSize", "Offset", "Used", "Total");
1404 RAMBLOCK_FOREACH(block) {
1405 psize = size_to_str(block->page_size);
1406 monitor_printf(mon, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64
1407 " 0x%016" PRIx64 "\n", block->idstr, psize,
1408 (uint64_t)block->offset,
1409 (uint64_t)block->used_length,
1410 (uint64_t)block->max_length);
1418 * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
1419 * may or may not name the same files / on the same filesystem now as
1420 * when we actually open and map them. Iterate over the file
1421 * descriptors instead, and use qemu_fd_getpagesize().
1423 static int find_max_supported_pagesize(Object *obj, void *opaque)
1426 long *hpsize_min = opaque;
1428 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1429 mem_path = object_property_get_str(obj, "mem-path", NULL);
1431 long hpsize = qemu_mempath_getpagesize(mem_path);
1432 if (hpsize < *hpsize_min) {
1433 *hpsize_min = hpsize;
1436 *hpsize_min = getpagesize();
1443 long qemu_getrampagesize(void)
1445 long hpsize = LONG_MAX;
1446 long mainrampagesize;
1447 Object *memdev_root;
1450 mainrampagesize = qemu_mempath_getpagesize(mem_path);
1452 mainrampagesize = getpagesize();
1455 /* it's possible we have memory-backend objects with
1456 * hugepage-backed RAM. these may get mapped into system
1457 * address space via -numa parameters or memory hotplug
1458 * hooks. we want to take these into account, but we
1459 * also want to make sure these supported hugepage
1460 * sizes are applicable across the entire range of memory
1461 * we may boot from, so we take the min across all
1462 * backends, and assume normal pages in cases where a
1463 * backend isn't backed by hugepages.
1465 memdev_root = object_resolve_path("/objects", NULL);
1467 object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
1469 if (hpsize == LONG_MAX) {
1470 /* No additional memory regions found ==> Report main RAM page size */
1471 return mainrampagesize;
1474 /* If NUMA is disabled or the NUMA nodes are not backed with a
1475 * memory-backend, then there is at least one node using "normal" RAM,
1476 * so if its page size is smaller we have got to report that size instead.
1478 if (hpsize > mainrampagesize &&
1479 (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) {
1482 error_report("Huge page support disabled (n/a for main memory).");
1485 return mainrampagesize;
1491 long qemu_getrampagesize(void)
1493 return getpagesize();
1498 static int64_t get_file_size(int fd)
1500 int64_t size = lseek(fd, 0, SEEK_END);
1507 static int file_ram_open(const char *path,
1508 const char *region_name,
1513 char *sanitized_name;
1519 fd = open(path, O_RDWR);
1521 /* @path names an existing file, use it */
1524 if (errno == ENOENT) {
1525 /* @path names a file that doesn't exist, create it */
1526 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1531 } else if (errno == EISDIR) {
1532 /* @path names a directory, create a file there */
1533 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1534 sanitized_name = g_strdup(region_name);
1535 for (c = sanitized_name; *c != '\0'; c++) {
1541 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1543 g_free(sanitized_name);
1545 fd = mkstemp(filename);
1553 if (errno != EEXIST && errno != EINTR) {
1554 error_setg_errno(errp, errno,
1555 "can't open backing store %s for guest RAM",
1560 * Try again on EINTR and EEXIST. The latter happens when
1561 * something else creates the file between our two open().
1568 static void *file_ram_alloc(RAMBlock *block,
1576 block->page_size = qemu_fd_getpagesize(fd);
1577 block->mr->align = block->page_size;
1578 #if defined(__s390x__)
1579 if (kvm_enabled()) {
1580 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1584 if (memory < block->page_size) {
1585 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1586 "or larger than page size 0x%zx",
1587 memory, block->page_size);
1591 memory = ROUND_UP(memory, block->page_size);
1594 * ftruncate is not supported by hugetlbfs in older
1595 * hosts, so don't bother bailing out on errors.
1596 * If anything goes wrong with it under other filesystems,
1599 * Do not truncate the non-empty backend file to avoid corrupting
1600 * the existing data in the file. Disabling shrinking is not
1601 * enough. For example, the current vNVDIMM implementation stores
1602 * the guest NVDIMM labels at the end of the backend file. If the
1603 * backend file is later extended, QEMU will not be able to find
1604 * those labels. Therefore, extending the non-empty backend file
1605 * is disabled as well.
1607 if (truncate && ftruncate(fd, memory)) {
1608 perror("ftruncate");
1611 area = qemu_ram_mmap(fd, memory, block->mr->align,
1612 block->flags & RAM_SHARED);
1613 if (area == MAP_FAILED) {
1614 error_setg_errno(errp, errno,
1615 "unable to map backing store for guest RAM");
1620 os_mem_prealloc(fd, area, memory, smp_cpus, errp);
1621 if (errp && *errp) {
1622 qemu_ram_munmap(area, memory);
1632 /* Called with the ramlist lock held. */
1633 static ram_addr_t find_ram_offset(ram_addr_t size)
1635 RAMBlock *block, *next_block;
1636 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1638 assert(size != 0); /* it would hand out same offset multiple times */
1640 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1644 RAMBLOCK_FOREACH(block) {
1645 ram_addr_t end, next = RAM_ADDR_MAX;
1647 end = block->offset + block->max_length;
1649 RAMBLOCK_FOREACH(next_block) {
1650 if (next_block->offset >= end) {
1651 next = MIN(next, next_block->offset);
1654 if (next - end >= size && next - end < mingap) {
1656 mingap = next - end;
1660 if (offset == RAM_ADDR_MAX) {
1661 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1669 unsigned long last_ram_page(void)
1672 ram_addr_t last = 0;
1675 RAMBLOCK_FOREACH(block) {
1676 last = MAX(last, block->offset + block->max_length);
1679 return last >> TARGET_PAGE_BITS;
1682 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1686 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1687 if (!machine_dump_guest_core(current_machine)) {
1688 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1690 perror("qemu_madvise");
1691 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1692 "but dump_guest_core=off specified\n");
1697 const char *qemu_ram_get_idstr(RAMBlock *rb)
1702 bool qemu_ram_is_shared(RAMBlock *rb)
1704 return rb->flags & RAM_SHARED;
1707 /* Called with iothread lock held. */
1708 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
1713 assert(!new_block->idstr[0]);
1716 char *id = qdev_get_dev_path(dev);
1718 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1722 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1725 RAMBLOCK_FOREACH(block) {
1726 if (block != new_block &&
1727 !strcmp(block->idstr, new_block->idstr)) {
1728 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1736 /* Called with iothread lock held. */
1737 void qemu_ram_unset_idstr(RAMBlock *block)
1739 /* FIXME: arch_init.c assumes that this is not called throughout
1740 * migration. Ignore the problem since hot-unplug during migration
1741 * does not work anyway.
1744 memset(block->idstr, 0, sizeof(block->idstr));
1748 size_t qemu_ram_pagesize(RAMBlock *rb)
1750 return rb->page_size;
1753 /* Returns the largest size of page in use */
1754 size_t qemu_ram_pagesize_largest(void)
1759 RAMBLOCK_FOREACH(block) {
1760 largest = MAX(largest, qemu_ram_pagesize(block));
1766 static int memory_try_enable_merging(void *addr, size_t len)
1768 if (!machine_mem_merge(current_machine)) {
1769 /* disabled by the user */
1773 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1776 /* Only legal before guest might have detected the memory size: e.g. on
1777 * incoming migration, or right after reset.
1779 * As memory core doesn't know how is memory accessed, it is up to
1780 * resize callback to update device state and/or add assertions to detect
1781 * misuse, if necessary.
1783 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
1787 newsize = HOST_PAGE_ALIGN(newsize);
1789 if (block->used_length == newsize) {
1793 if (!(block->flags & RAM_RESIZEABLE)) {
1794 error_setg_errno(errp, EINVAL,
1795 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1796 " in != 0x" RAM_ADDR_FMT, block->idstr,
1797 newsize, block->used_length);
1801 if (block->max_length < newsize) {
1802 error_setg_errno(errp, EINVAL,
1803 "Length too large: %s: 0x" RAM_ADDR_FMT
1804 " > 0x" RAM_ADDR_FMT, block->idstr,
1805 newsize, block->max_length);
1809 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1810 block->used_length = newsize;
1811 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1813 memory_region_set_size(block->mr, newsize);
1814 if (block->resized) {
1815 block->resized(block->idstr, newsize, block->host);
1820 /* Called with ram_list.mutex held */
1821 static void dirty_memory_extend(ram_addr_t old_ram_size,
1822 ram_addr_t new_ram_size)
1824 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1825 DIRTY_MEMORY_BLOCK_SIZE);
1826 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1827 DIRTY_MEMORY_BLOCK_SIZE);
1830 /* Only need to extend if block count increased */
1831 if (new_num_blocks <= old_num_blocks) {
1835 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1836 DirtyMemoryBlocks *old_blocks;
1837 DirtyMemoryBlocks *new_blocks;
1840 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1841 new_blocks = g_malloc(sizeof(*new_blocks) +
1842 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1844 if (old_num_blocks) {
1845 memcpy(new_blocks->blocks, old_blocks->blocks,
1846 old_num_blocks * sizeof(old_blocks->blocks[0]));
1849 for (j = old_num_blocks; j < new_num_blocks; j++) {
1850 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1853 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1856 g_free_rcu(old_blocks, rcu);
1861 static void ram_block_add(RAMBlock *new_block, Error **errp)
1864 RAMBlock *last_block = NULL;
1865 ram_addr_t old_ram_size, new_ram_size;
1868 old_ram_size = last_ram_page();
1870 qemu_mutex_lock_ramlist();
1871 new_block->offset = find_ram_offset(new_block->max_length);
1873 if (!new_block->host) {
1874 if (xen_enabled()) {
1875 xen_ram_alloc(new_block->offset, new_block->max_length,
1876 new_block->mr, &err);
1878 error_propagate(errp, err);
1879 qemu_mutex_unlock_ramlist();
1883 new_block->host = phys_mem_alloc(new_block->max_length,
1884 &new_block->mr->align);
1885 if (!new_block->host) {
1886 error_setg_errno(errp, errno,
1887 "cannot set up guest memory '%s'",
1888 memory_region_name(new_block->mr));
1889 qemu_mutex_unlock_ramlist();
1892 memory_try_enable_merging(new_block->host, new_block->max_length);
1896 new_ram_size = MAX(old_ram_size,
1897 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1898 if (new_ram_size > old_ram_size) {
1899 dirty_memory_extend(old_ram_size, new_ram_size);
1901 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1902 * QLIST (which has an RCU-friendly variant) does not have insertion at
1903 * tail, so save the last element in last_block.
1905 RAMBLOCK_FOREACH(block) {
1907 if (block->max_length < new_block->max_length) {
1912 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1913 } else if (last_block) {
1914 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1915 } else { /* list is empty */
1916 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1918 ram_list.mru_block = NULL;
1920 /* Write list before version */
1923 qemu_mutex_unlock_ramlist();
1925 cpu_physical_memory_set_dirty_range(new_block->offset,
1926 new_block->used_length,
1929 if (new_block->host) {
1930 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1931 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1932 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1933 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1934 ram_block_notify_add(new_block->host, new_block->max_length);
1939 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
1943 RAMBlock *new_block;
1944 Error *local_err = NULL;
1947 if (xen_enabled()) {
1948 error_setg(errp, "-mem-path not supported with Xen");
1952 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1954 "host lacks kvm mmu notifiers, -mem-path unsupported");
1958 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1960 * file_ram_alloc() needs to allocate just like
1961 * phys_mem_alloc, but we haven't bothered to provide
1965 "-mem-path not supported with this accelerator");
1969 size = HOST_PAGE_ALIGN(size);
1970 file_size = get_file_size(fd);
1971 if (file_size > 0 && file_size < size) {
1972 error_setg(errp, "backing store %s size 0x%" PRIx64
1973 " does not match 'size' option 0x" RAM_ADDR_FMT,
1974 mem_path, file_size, size);
1978 new_block = g_malloc0(sizeof(*new_block));
1980 new_block->used_length = size;
1981 new_block->max_length = size;
1982 new_block->flags = share ? RAM_SHARED : 0;
1983 new_block->host = file_ram_alloc(new_block, size, fd, !file_size, errp);
1984 if (!new_block->host) {
1989 ram_block_add(new_block, &local_err);
1992 error_propagate(errp, local_err);
2000 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
2001 bool share, const char *mem_path,
2008 fd = file_ram_open(mem_path, memory_region_name(mr), &created, errp);
2013 block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
2027 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
2028 void (*resized)(const char*,
2031 void *host, bool resizeable,
2032 MemoryRegion *mr, Error **errp)
2034 RAMBlock *new_block;
2035 Error *local_err = NULL;
2037 size = HOST_PAGE_ALIGN(size);
2038 max_size = HOST_PAGE_ALIGN(max_size);
2039 new_block = g_malloc0(sizeof(*new_block));
2041 new_block->resized = resized;
2042 new_block->used_length = size;
2043 new_block->max_length = max_size;
2044 assert(max_size >= size);
2046 new_block->page_size = getpagesize();
2047 new_block->host = host;
2049 new_block->flags |= RAM_PREALLOC;
2052 new_block->flags |= RAM_RESIZEABLE;
2054 ram_block_add(new_block, &local_err);
2057 error_propagate(errp, local_err);
2063 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2064 MemoryRegion *mr, Error **errp)
2066 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
2069 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
2071 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
2074 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
2075 void (*resized)(const char*,
2078 MemoryRegion *mr, Error **errp)
2080 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
2083 static void reclaim_ramblock(RAMBlock *block)
2085 if (block->flags & RAM_PREALLOC) {
2087 } else if (xen_enabled()) {
2088 xen_invalidate_map_cache_entry(block->host);
2090 } else if (block->fd >= 0) {
2091 qemu_ram_munmap(block->host, block->max_length);
2095 qemu_anon_ram_free(block->host, block->max_length);
2100 void qemu_ram_free(RAMBlock *block)
2107 ram_block_notify_remove(block->host, block->max_length);
2110 qemu_mutex_lock_ramlist();
2111 QLIST_REMOVE_RCU(block, next);
2112 ram_list.mru_block = NULL;
2113 /* Write list before version */
2116 call_rcu(block, reclaim_ramblock, rcu);
2117 qemu_mutex_unlock_ramlist();
2121 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2128 RAMBLOCK_FOREACH(block) {
2129 offset = addr - block->offset;
2130 if (offset < block->max_length) {
2131 vaddr = ramblock_ptr(block, offset);
2132 if (block->flags & RAM_PREALLOC) {
2134 } else if (xen_enabled()) {
2138 if (block->fd >= 0) {
2139 flags |= (block->flags & RAM_SHARED ?
2140 MAP_SHARED : MAP_PRIVATE);
2141 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2142 flags, block->fd, offset);
2145 * Remap needs to match alloc. Accelerators that
2146 * set phys_mem_alloc never remap. If they did,
2147 * we'd need a remap hook here.
2149 assert(phys_mem_alloc == qemu_anon_ram_alloc);
2151 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2152 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2155 if (area != vaddr) {
2156 fprintf(stderr, "Could not remap addr: "
2157 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2161 memory_try_enable_merging(vaddr, length);
2162 qemu_ram_setup_dump(vaddr, length);
2167 #endif /* !_WIN32 */
2169 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2170 * This should not be used for general purpose DMA. Use address_space_map
2171 * or address_space_rw instead. For local memory (e.g. video ram) that the
2172 * device owns, use memory_region_get_ram_ptr.
2174 * Called within RCU critical section.
2176 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
2178 RAMBlock *block = ram_block;
2180 if (block == NULL) {
2181 block = qemu_get_ram_block(addr);
2182 addr -= block->offset;
2185 if (xen_enabled() && block->host == NULL) {
2186 /* We need to check if the requested address is in the RAM
2187 * because we don't want to map the entire memory in QEMU.
2188 * In that case just map until the end of the page.
2190 if (block->offset == 0) {
2191 return xen_map_cache(addr, 0, 0, false);
2194 block->host = xen_map_cache(block->offset, block->max_length, 1, false);
2196 return ramblock_ptr(block, addr);
2199 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
2200 * but takes a size argument.
2202 * Called within RCU critical section.
2204 static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
2205 hwaddr *size, bool lock)
2207 RAMBlock *block = ram_block;
2212 if (block == NULL) {
2213 block = qemu_get_ram_block(addr);
2214 addr -= block->offset;
2216 *size = MIN(*size, block->max_length - addr);
2218 if (xen_enabled() && block->host == NULL) {
2219 /* We need to check if the requested address is in the RAM
2220 * because we don't want to map the entire memory in QEMU.
2221 * In that case just map the requested area.
2223 if (block->offset == 0) {
2224 return xen_map_cache(addr, *size, lock, lock);
2227 block->host = xen_map_cache(block->offset, block->max_length, 1, lock);
2230 return ramblock_ptr(block, addr);
2234 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
2237 * ptr: Host pointer to look up
2238 * round_offset: If true round the result offset down to a page boundary
2239 * *ram_addr: set to result ram_addr
2240 * *offset: set to result offset within the RAMBlock
2242 * Returns: RAMBlock (or NULL if not found)
2244 * By the time this function returns, the returned pointer is not protected
2245 * by RCU anymore. If the caller is not within an RCU critical section and
2246 * does not hold the iothread lock, it must have other means of protecting the
2247 * pointer, such as a reference to the region that includes the incoming
2250 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
2254 uint8_t *host = ptr;
2256 if (xen_enabled()) {
2257 ram_addr_t ram_addr;
2259 ram_addr = xen_ram_addr_from_mapcache(ptr);
2260 block = qemu_get_ram_block(ram_addr);
2262 *offset = ram_addr - block->offset;
2269 block = atomic_rcu_read(&ram_list.mru_block);
2270 if (block && block->host && host - block->host < block->max_length) {
2274 RAMBLOCK_FOREACH(block) {
2275 /* This case append when the block is not mapped. */
2276 if (block->host == NULL) {
2279 if (host - block->host < block->max_length) {
2288 *offset = (host - block->host);
2290 *offset &= TARGET_PAGE_MASK;
2297 * Finds the named RAMBlock
2299 * name: The name of RAMBlock to find
2301 * Returns: RAMBlock (or NULL if not found)
2303 RAMBlock *qemu_ram_block_by_name(const char *name)
2307 RAMBLOCK_FOREACH(block) {
2308 if (!strcmp(name, block->idstr)) {
2316 /* Some of the softmmu routines need to translate from a host pointer
2317 (typically a TLB entry) back to a ram offset. */
2318 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2323 block = qemu_ram_block_from_host(ptr, false, &offset);
2325 return RAM_ADDR_INVALID;
2328 return block->offset + offset;
2331 /* Called within RCU critical section. */
2332 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
2333 uint64_t val, unsigned size)
2335 bool locked = false;
2337 assert(tcg_enabled());
2338 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
2341 tb_invalidate_phys_page_fast(ram_addr, size);
2345 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2348 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2351 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2361 /* Set both VGA and migration bits for simplicity and to remove
2362 * the notdirty callback faster.
2364 cpu_physical_memory_set_dirty_range(ram_addr, size,
2365 DIRTY_CLIENTS_NOCODE);
2366 /* we remove the notdirty callback only if the code has been
2368 if (!cpu_physical_memory_is_clean(ram_addr)) {
2369 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
2373 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2374 unsigned size, bool is_write)
2379 static const MemoryRegionOps notdirty_mem_ops = {
2380 .write = notdirty_mem_write,
2381 .valid.accepts = notdirty_mem_accepts,
2382 .endianness = DEVICE_NATIVE_ENDIAN,
2385 /* Generate a debug exception if a watchpoint has been hit. */
2386 static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
2388 CPUState *cpu = current_cpu;
2389 CPUClass *cc = CPU_GET_CLASS(cpu);
2390 CPUArchState *env = cpu->env_ptr;
2391 target_ulong pc, cs_base;
2396 assert(tcg_enabled());
2397 if (cpu->watchpoint_hit) {
2398 /* We re-entered the check after replacing the TB. Now raise
2399 * the debug interrupt so that is will trigger after the
2400 * current instruction. */
2401 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2404 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2405 vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len);
2406 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2407 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2408 && (wp->flags & flags)) {
2409 if (flags == BP_MEM_READ) {
2410 wp->flags |= BP_WATCHPOINT_HIT_READ;
2412 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2414 wp->hitaddr = vaddr;
2415 wp->hitattrs = attrs;
2416 if (!cpu->watchpoint_hit) {
2417 if (wp->flags & BP_CPU &&
2418 !cc->debug_check_watchpoint(cpu, wp)) {
2419 wp->flags &= ~BP_WATCHPOINT_HIT;
2422 cpu->watchpoint_hit = wp;
2424 /* Both tb_lock and iothread_mutex will be reset when
2425 * cpu_loop_exit or cpu_loop_exit_noexc longjmp
2426 * back into the cpu_exec main loop.
2429 tb_check_watchpoint(cpu);
2430 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2431 cpu->exception_index = EXCP_DEBUG;
2434 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2435 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
2436 cpu_loop_exit_noexc(cpu);
2440 wp->flags &= ~BP_WATCHPOINT_HIT;
2445 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2446 so these check for a hit then pass through to the normal out-of-line
2448 static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2449 unsigned size, MemTxAttrs attrs)
2453 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2454 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2456 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2459 data = address_space_ldub(as, addr, attrs, &res);
2462 data = address_space_lduw(as, addr, attrs, &res);
2465 data = address_space_ldl(as, addr, attrs, &res);
2473 static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2474 uint64_t val, unsigned size,
2478 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2479 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2481 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2484 address_space_stb(as, addr, val, attrs, &res);
2487 address_space_stw(as, addr, val, attrs, &res);
2490 address_space_stl(as, addr, val, attrs, &res);
2497 static const MemoryRegionOps watch_mem_ops = {
2498 .read_with_attrs = watch_mem_read,
2499 .write_with_attrs = watch_mem_write,
2500 .endianness = DEVICE_NATIVE_ENDIAN,
2503 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2504 const uint8_t *buf, int len);
2505 static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
2508 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2509 unsigned len, MemTxAttrs attrs)
2511 subpage_t *subpage = opaque;
2515 #if defined(DEBUG_SUBPAGE)
2516 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2517 subpage, len, addr);
2519 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len);
2525 *data = ldub_p(buf);
2528 *data = lduw_p(buf);
2541 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2542 uint64_t value, unsigned len, MemTxAttrs attrs)
2544 subpage_t *subpage = opaque;
2547 #if defined(DEBUG_SUBPAGE)
2548 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2549 " value %"PRIx64"\n",
2550 __func__, subpage, len, addr, value);
2568 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
2571 static bool subpage_accepts(void *opaque, hwaddr addr,
2572 unsigned len, bool is_write)
2574 subpage_t *subpage = opaque;
2575 #if defined(DEBUG_SUBPAGE)
2576 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2577 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2580 return flatview_access_valid(subpage->fv, addr + subpage->base,
2584 static const MemoryRegionOps subpage_ops = {
2585 .read_with_attrs = subpage_read,
2586 .write_with_attrs = subpage_write,
2587 .impl.min_access_size = 1,
2588 .impl.max_access_size = 8,
2589 .valid.min_access_size = 1,
2590 .valid.max_access_size = 8,
2591 .valid.accepts = subpage_accepts,
2592 .endianness = DEVICE_NATIVE_ENDIAN,
2595 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2600 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2602 idx = SUBPAGE_IDX(start);
2603 eidx = SUBPAGE_IDX(end);
2604 #if defined(DEBUG_SUBPAGE)
2605 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2606 __func__, mmio, start, end, idx, eidx, section);
2608 for (; idx <= eidx; idx++) {
2609 mmio->sub_section[idx] = section;
2615 static subpage_t *subpage_init(FlatView *fv, hwaddr base)
2619 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
2622 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2623 NULL, TARGET_PAGE_SIZE);
2624 mmio->iomem.subpage = true;
2625 #if defined(DEBUG_SUBPAGE)
2626 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2627 mmio, base, TARGET_PAGE_SIZE);
2629 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2634 static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
2637 MemoryRegionSection section = {
2640 .offset_within_address_space = 0,
2641 .offset_within_region = 0,
2642 .size = int128_2_64(),
2645 return phys_section_add(map, §ion);
2648 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2650 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2651 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2652 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2653 MemoryRegionSection *sections = d->map.sections;
2655 return sections[index & ~TARGET_PAGE_MASK].mr;
2658 static void io_mem_init(void)
2660 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2661 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2664 /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
2665 * which can be called without the iothread mutex.
2667 memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL,
2669 memory_region_clear_global_locking(&io_mem_notdirty);
2671 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2675 AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
2677 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2680 n = dummy_section(&d->map, fv, &io_mem_unassigned);
2681 assert(n == PHYS_SECTION_UNASSIGNED);
2682 n = dummy_section(&d->map, fv, &io_mem_notdirty);
2683 assert(n == PHYS_SECTION_NOTDIRTY);
2684 n = dummy_section(&d->map, fv, &io_mem_rom);
2685 assert(n == PHYS_SECTION_ROM);
2686 n = dummy_section(&d->map, fv, &io_mem_watch);
2687 assert(n == PHYS_SECTION_WATCH);
2689 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2694 void address_space_dispatch_free(AddressSpaceDispatch *d)
2696 phys_sections_free(&d->map);
2700 static void tcg_commit(MemoryListener *listener)
2702 CPUAddressSpace *cpuas;
2703 AddressSpaceDispatch *d;
2705 /* since each CPU stores ram addresses in its TLB cache, we must
2706 reset the modified entries */
2707 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2708 cpu_reloading_memory_map();
2709 /* The CPU and TLB are protected by the iothread lock.
2710 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2711 * may have split the RCU critical section.
2713 d = address_space_to_dispatch(cpuas->as);
2714 atomic_rcu_set(&cpuas->memory_dispatch, d);
2715 tlb_flush(cpuas->cpu);
2718 static void memory_map_init(void)
2720 system_memory = g_malloc(sizeof(*system_memory));
2722 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2723 address_space_init(&address_space_memory, system_memory, "memory");
2725 system_io = g_malloc(sizeof(*system_io));
2726 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2728 address_space_init(&address_space_io, system_io, "I/O");
2731 MemoryRegion *get_system_memory(void)
2733 return system_memory;
2736 MemoryRegion *get_system_io(void)
2741 #endif /* !defined(CONFIG_USER_ONLY) */
2743 /* physical memory access (slow version, mainly for debug) */
2744 #if defined(CONFIG_USER_ONLY)
2745 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2746 uint8_t *buf, int len, int is_write)
2753 page = addr & TARGET_PAGE_MASK;
2754 l = (page + TARGET_PAGE_SIZE) - addr;
2757 flags = page_get_flags(page);
2758 if (!(flags & PAGE_VALID))
2761 if (!(flags & PAGE_WRITE))
2763 /* XXX: this code should not depend on lock_user */
2764 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2767 unlock_user(p, addr, l);
2769 if (!(flags & PAGE_READ))
2771 /* XXX: this code should not depend on lock_user */
2772 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2775 unlock_user(p, addr, 0);
2786 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2789 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2790 addr += memory_region_get_ram_addr(mr);
2792 /* No early return if dirty_log_mask is or becomes 0, because
2793 * cpu_physical_memory_set_dirty_range will still call
2794 * xen_modified_memory.
2796 if (dirty_log_mask) {
2798 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2800 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2801 assert(tcg_enabled());
2803 tb_invalidate_phys_range(addr, addr + length);
2805 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2807 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2810 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2812 unsigned access_size_max = mr->ops->valid.max_access_size;
2814 /* Regions are assumed to support 1-4 byte accesses unless
2815 otherwise specified. */
2816 if (access_size_max == 0) {
2817 access_size_max = 4;
2820 /* Bound the maximum access by the alignment of the address. */
2821 if (!mr->ops->impl.unaligned) {
2822 unsigned align_size_max = addr & -addr;
2823 if (align_size_max != 0 && align_size_max < access_size_max) {
2824 access_size_max = align_size_max;
2828 /* Don't attempt accesses larger than the maximum. */
2829 if (l > access_size_max) {
2830 l = access_size_max;
2837 static bool prepare_mmio_access(MemoryRegion *mr)
2839 bool unlocked = !qemu_mutex_iothread_locked();
2840 bool release_lock = false;
2842 if (unlocked && mr->global_locking) {
2843 qemu_mutex_lock_iothread();
2845 release_lock = true;
2847 if (mr->flush_coalesced_mmio) {
2849 qemu_mutex_lock_iothread();
2851 qemu_flush_coalesced_mmio_buffer();
2853 qemu_mutex_unlock_iothread();
2857 return release_lock;
2860 /* Called within RCU critical section. */
2861 static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
2864 int len, hwaddr addr1,
2865 hwaddr l, MemoryRegion *mr)
2869 MemTxResult result = MEMTX_OK;
2870 bool release_lock = false;
2873 if (!memory_access_is_direct(mr, true)) {
2874 release_lock |= prepare_mmio_access(mr);
2875 l = memory_access_size(mr, l, addr1);
2876 /* XXX: could force current_cpu to NULL to avoid
2880 /* 64 bit write access */
2882 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2886 /* 32 bit write access */
2887 val = (uint32_t)ldl_p(buf);
2888 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2892 /* 16 bit write access */
2894 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2898 /* 8 bit write access */
2900 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2908 ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
2909 memcpy(ptr, buf, l);
2910 invalidate_and_set_dirty(mr, addr1, l);
2914 qemu_mutex_unlock_iothread();
2915 release_lock = false;
2927 mr = flatview_translate(fv, addr, &addr1, &l, true);
2933 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2934 const uint8_t *buf, int len)
2939 MemTxResult result = MEMTX_OK;
2944 mr = flatview_translate(fv, addr, &addr1, &l, true);
2945 result = flatview_write_continue(fv, addr, attrs, buf, len,
2953 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2955 const uint8_t *buf, int len)
2957 return flatview_write(address_space_to_flatview(as), addr, attrs, buf, len);
2960 /* Called within RCU critical section. */
2961 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2962 MemTxAttrs attrs, uint8_t *buf,
2963 int len, hwaddr addr1, hwaddr l,
2968 MemTxResult result = MEMTX_OK;
2969 bool release_lock = false;
2972 if (!memory_access_is_direct(mr, false)) {
2974 release_lock |= prepare_mmio_access(mr);
2975 l = memory_access_size(mr, l, addr1);
2978 /* 64 bit read access */
2979 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2984 /* 32 bit read access */
2985 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2990 /* 16 bit read access */
2991 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2996 /* 8 bit read access */
2997 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
3006 ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
3007 memcpy(buf, ptr, l);
3011 qemu_mutex_unlock_iothread();
3012 release_lock = false;
3024 mr = flatview_translate(fv, addr, &addr1, &l, false);
3030 MemTxResult flatview_read_full(FlatView *fv, hwaddr addr,
3031 MemTxAttrs attrs, uint8_t *buf, int len)
3036 MemTxResult result = MEMTX_OK;
3041 mr = flatview_translate(fv, addr, &addr1, &l, false);
3042 result = flatview_read_continue(fv, addr, attrs, buf, len,
3050 static MemTxResult flatview_rw(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
3051 uint8_t *buf, int len, bool is_write)
3054 return flatview_write(fv, addr, attrs, (uint8_t *)buf, len);
3056 return flatview_read(fv, addr, attrs, (uint8_t *)buf, len);
3060 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
3061 MemTxAttrs attrs, uint8_t *buf,
3062 int len, bool is_write)
3064 return flatview_rw(address_space_to_flatview(as),
3065 addr, attrs, buf, len, is_write);
3068 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
3069 int len, int is_write)
3071 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
3072 buf, len, is_write);
3075 enum write_rom_type {
3080 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
3081 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
3091 mr = address_space_translate(as, addr, &addr1, &l, true);
3093 if (!(memory_region_is_ram(mr) ||
3094 memory_region_is_romd(mr))) {
3095 l = memory_access_size(mr, l, addr1);
3098 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3101 memcpy(ptr, buf, l);
3102 invalidate_and_set_dirty(mr, addr1, l);
3105 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
3116 /* used for ROM loading : can write in RAM and ROM */
3117 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
3118 const uint8_t *buf, int len)
3120 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
3123 void cpu_flush_icache_range(hwaddr start, int len)
3126 * This function should do the same thing as an icache flush that was
3127 * triggered from within the guest. For TCG we are always cache coherent,
3128 * so there is no need to flush anything. For KVM / Xen we need to flush
3129 * the host's instruction cache at least.
3131 if (tcg_enabled()) {
3135 cpu_physical_memory_write_rom_internal(&address_space_memory,
3136 start, NULL, len, FLUSH_CACHE);
3147 static BounceBuffer bounce;
3149 typedef struct MapClient {
3151 QLIST_ENTRY(MapClient) link;
3154 QemuMutex map_client_list_lock;
3155 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3156 = QLIST_HEAD_INITIALIZER(map_client_list);
3158 static void cpu_unregister_map_client_do(MapClient *client)
3160 QLIST_REMOVE(client, link);
3164 static void cpu_notify_map_clients_locked(void)
3168 while (!QLIST_EMPTY(&map_client_list)) {
3169 client = QLIST_FIRST(&map_client_list);
3170 qemu_bh_schedule(client->bh);
3171 cpu_unregister_map_client_do(client);
3175 void cpu_register_map_client(QEMUBH *bh)
3177 MapClient *client = g_malloc(sizeof(*client));
3179 qemu_mutex_lock(&map_client_list_lock);
3181 QLIST_INSERT_HEAD(&map_client_list, client, link);
3182 if (!atomic_read(&bounce.in_use)) {
3183 cpu_notify_map_clients_locked();
3185 qemu_mutex_unlock(&map_client_list_lock);
3188 void cpu_exec_init_all(void)
3190 qemu_mutex_init(&ram_list.mutex);
3191 /* The data structures we set up here depend on knowing the page size,
3192 * so no more changes can be made after this point.
3193 * In an ideal world, nothing we did before we had finished the
3194 * machine setup would care about the target page size, and we could
3195 * do this much later, rather than requiring board models to state
3196 * up front what their requirements are.
3198 finalize_target_page_bits();
3201 qemu_mutex_init(&map_client_list_lock);
3204 void cpu_unregister_map_client(QEMUBH *bh)
3208 qemu_mutex_lock(&map_client_list_lock);
3209 QLIST_FOREACH(client, &map_client_list, link) {
3210 if (client->bh == bh) {
3211 cpu_unregister_map_client_do(client);
3215 qemu_mutex_unlock(&map_client_list_lock);
3218 static void cpu_notify_map_clients(void)
3220 qemu_mutex_lock(&map_client_list_lock);
3221 cpu_notify_map_clients_locked();
3222 qemu_mutex_unlock(&map_client_list_lock);
3225 static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
3234 mr = flatview_translate(fv, addr, &xlat, &l, is_write);
3235 if (!memory_access_is_direct(mr, is_write)) {
3236 l = memory_access_size(mr, l, addr);
3237 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
3250 bool address_space_access_valid(AddressSpace *as, hwaddr addr,
3251 int len, bool is_write)
3253 return flatview_access_valid(address_space_to_flatview(as),
3254 addr, len, is_write);
3258 flatview_extend_translation(FlatView *fv, hwaddr addr,
3260 MemoryRegion *mr, hwaddr base, hwaddr len,
3265 MemoryRegion *this_mr;
3271 if (target_len == 0) {
3276 this_mr = flatview_translate(fv, addr, &xlat,
3278 if (this_mr != mr || xlat != base + done) {
3284 /* Map a physical memory region into a host virtual address.
3285 * May map a subset of the requested range, given by and returned in *plen.
3286 * May return NULL if resources needed to perform the mapping are exhausted.
3287 * Use only for reads OR writes - not for read-modify-write operations.
3288 * Use cpu_register_map_client() to know when retrying the map operation is
3289 * likely to succeed.
3291 void *address_space_map(AddressSpace *as,
3300 FlatView *fv = address_space_to_flatview(as);
3308 mr = flatview_translate(fv, addr, &xlat, &l, is_write);
3310 if (!memory_access_is_direct(mr, is_write)) {
3311 if (atomic_xchg(&bounce.in_use, true)) {
3315 /* Avoid unbounded allocations */
3316 l = MIN(l, TARGET_PAGE_SIZE);
3317 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
3321 memory_region_ref(mr);
3324 flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED,
3330 return bounce.buffer;
3334 memory_region_ref(mr);
3335 *plen = flatview_extend_translation(fv, addr, len, mr, xlat,
3337 ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
3343 /* Unmaps a memory region previously mapped by address_space_map().
3344 * Will also mark the memory as dirty if is_write == 1. access_len gives
3345 * the amount of memory that was actually read or written by the caller.
3347 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3348 int is_write, hwaddr access_len)
3350 if (buffer != bounce.buffer) {
3354 mr = memory_region_from_host(buffer, &addr1);
3357 invalidate_and_set_dirty(mr, addr1, access_len);
3359 if (xen_enabled()) {
3360 xen_invalidate_map_cache_entry(buffer);
3362 memory_region_unref(mr);
3366 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3367 bounce.buffer, access_len);
3369 qemu_vfree(bounce.buffer);
3370 bounce.buffer = NULL;
3371 memory_region_unref(bounce.mr);
3372 atomic_mb_set(&bounce.in_use, false);
3373 cpu_notify_map_clients();
3376 void *cpu_physical_memory_map(hwaddr addr,
3380 return address_space_map(&address_space_memory, addr, plen, is_write);
3383 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3384 int is_write, hwaddr access_len)
3386 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3389 #define ARG1_DECL AddressSpace *as
3392 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
3393 #define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
3394 #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3395 #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3396 #define RCU_READ_LOCK(...) rcu_read_lock()
3397 #define RCU_READ_UNLOCK(...) rcu_read_unlock()
3398 #include "memory_ldst.inc.c"
3400 int64_t address_space_cache_init(MemoryRegionCache *cache,
3412 void address_space_cache_invalidate(MemoryRegionCache *cache,
3418 void address_space_cache_destroy(MemoryRegionCache *cache)
3423 #define ARG1_DECL MemoryRegionCache *cache
3425 #define SUFFIX _cached
3426 #define TRANSLATE(addr, ...) \
3427 address_space_translate(cache->as, cache->xlat + (addr), __VA_ARGS__)
3428 #define IS_DIRECT(mr, is_write) true
3429 #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3430 #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3431 #define RCU_READ_LOCK() rcu_read_lock()
3432 #define RCU_READ_UNLOCK() rcu_read_unlock()
3433 #include "memory_ldst.inc.c"
3435 /* virtual memory access for debug (includes writing to ROM) */
3436 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3437 uint8_t *buf, int len, int is_write)
3443 cpu_synchronize_state(cpu);
3448 page = addr & TARGET_PAGE_MASK;
3449 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3450 asidx = cpu_asidx_from_attrs(cpu, attrs);
3451 /* if no physical page mapped, return an error */
3452 if (phys_addr == -1)
3454 l = (page + TARGET_PAGE_SIZE) - addr;
3457 phys_addr += (addr & ~TARGET_PAGE_MASK);
3459 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3462 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3463 MEMTXATTRS_UNSPECIFIED,
3474 * Allows code that needs to deal with migration bitmaps etc to still be built
3475 * target independent.
3477 size_t qemu_target_page_size(void)
3479 return TARGET_PAGE_SIZE;
3482 int qemu_target_page_bits(void)
3484 return TARGET_PAGE_BITS;
3487 int qemu_target_page_bits_min(void)
3489 return TARGET_PAGE_BITS_MIN;
3494 * A helper function for the _utterly broken_ virtio device model to find out if
3495 * it's running on a big endian machine. Don't do this at home kids!
3497 bool target_words_bigendian(void);
3498 bool target_words_bigendian(void)
3500 #if defined(TARGET_WORDS_BIGENDIAN)
3507 #ifndef CONFIG_USER_ONLY
3508 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3515 mr = address_space_translate(&address_space_memory,
3516 phys_addr, &phys_addr, &l, false);
3518 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3523 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3529 RAMBLOCK_FOREACH(block) {
3530 ret = func(block->idstr, block->host, block->offset,
3531 block->used_length, opaque);
3541 * Unmap pages of memory from start to start+length such that
3542 * they a) read as 0, b) Trigger whatever fault mechanism
3543 * the OS provides for postcopy.
3544 * The pages must be unmapped by the end of the function.
3545 * Returns: 0 on success, none-0 on failure
3548 int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
3552 uint8_t *host_startaddr = rb->host + start;
3554 if ((uintptr_t)host_startaddr & (rb->page_size - 1)) {
3555 error_report("ram_block_discard_range: Unaligned start address: %p",
3560 if ((start + length) <= rb->used_length) {
3561 uint8_t *host_endaddr = host_startaddr + length;
3562 if ((uintptr_t)host_endaddr & (rb->page_size - 1)) {
3563 error_report("ram_block_discard_range: Unaligned end address: %p",
3568 errno = ENOTSUP; /* If we are missing MADVISE etc */
3570 if (rb->page_size == qemu_host_page_size) {
3571 #if defined(CONFIG_MADVISE)
3572 /* Note: We need the madvise MADV_DONTNEED behaviour of definitely
3575 ret = madvise(host_startaddr, length, MADV_DONTNEED);
3578 /* Huge page case - unfortunately it can't do DONTNEED, but
3579 * it can do the equivalent by FALLOC_FL_PUNCH_HOLE in the
3582 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3583 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3589 error_report("ram_block_discard_range: Failed to discard range "
3590 "%s:%" PRIx64 " +%zx (%d)",
3591 rb->idstr, start, length, ret);
3594 error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
3595 "/%zx/" RAM_ADDR_FMT")",
3596 rb->idstr, start, length, rb->used_length);
3605 void page_size_init(void)
3607 /* NOTE: we can always suppose that qemu_host_page_size >=
3609 qemu_real_host_page_size = getpagesize();
3610 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
3611 if (qemu_host_page_size == 0) {
3612 qemu_host_page_size = qemu_real_host_page_size;
3614 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
3615 qemu_host_page_size = TARGET_PAGE_SIZE;
3617 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;