4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_SUBPAGE
55 #if !defined(CONFIG_USER_ONLY)
57 static int in_migration;
59 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
61 static MemoryRegion *system_memory;
62 static MemoryRegion *system_io;
64 AddressSpace address_space_io;
65 AddressSpace address_space_memory;
67 MemoryRegion io_mem_rom, io_mem_notdirty;
68 static MemoryRegion io_mem_unassigned;
72 CPUArchState *first_cpu;
73 /* current CPU in the current thread. It is only valid inside
75 DEFINE_TLS(CPUArchState *,cpu_single_env);
76 /* 0 = Do not count executed instructions.
77 1 = Precise instruction counting.
78 2 = Adaptive rate instruction counting. */
81 #if !defined(CONFIG_USER_ONLY)
83 typedef struct PhysPageEntry PhysPageEntry;
85 struct PhysPageEntry {
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
91 struct AddressSpaceDispatch {
92 /* This is a multi-level map on the physical address space.
93 * The bottom level has pointers to MemoryRegionSections.
95 PhysPageEntry phys_map;
99 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
100 typedef struct subpage_t {
104 uint16_t sub_section[TARGET_PAGE_SIZE];
107 #define PHYS_SECTION_UNASSIGNED 0
108 #define PHYS_SECTION_NOTDIRTY 1
109 #define PHYS_SECTION_ROM 2
110 #define PHYS_SECTION_WATCH 3
112 typedef PhysPageEntry Node[L2_SIZE];
114 typedef struct PhysPageMap {
115 unsigned sections_nb;
116 unsigned sections_nb_alloc;
118 unsigned nodes_nb_alloc;
120 MemoryRegionSection *sections;
123 static PhysPageMap cur_map;
124 static PhysPageMap next_map;
126 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
128 static void io_mem_init(void);
129 static void memory_map_init(void);
130 static void *qemu_safe_ram_ptr(ram_addr_t addr);
132 static MemoryRegion io_mem_watch;
135 #if !defined(CONFIG_USER_ONLY)
137 static void phys_map_node_reserve(unsigned nodes)
139 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
140 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
142 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
143 next_map.nodes_nb + nodes);
144 next_map.nodes = g_renew(Node, next_map.nodes,
145 next_map.nodes_nb_alloc);
149 static uint16_t phys_map_node_alloc(void)
154 ret = next_map.nodes_nb++;
155 assert(ret != PHYS_MAP_NODE_NIL);
156 assert(ret != next_map.nodes_nb_alloc);
157 for (i = 0; i < L2_SIZE; ++i) {
158 next_map.nodes[ret][i].is_leaf = 0;
159 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
164 static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
165 hwaddr *nb, uint16_t leaf,
170 hwaddr step = (hwaddr)1 << (level * L2_BITS);
172 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
173 lp->ptr = phys_map_node_alloc();
174 p = next_map.nodes[lp->ptr];
176 for (i = 0; i < L2_SIZE; i++) {
178 p[i].ptr = PHYS_SECTION_UNASSIGNED;
182 p = next_map.nodes[lp->ptr];
184 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
186 while (*nb && lp < &p[L2_SIZE]) {
187 if ((*index & (step - 1)) == 0 && *nb >= step) {
193 phys_page_set_level(lp, index, nb, leaf, level - 1);
199 static void phys_page_set(AddressSpaceDispatch *d,
200 hwaddr index, hwaddr nb,
203 /* Wildly overreserve - it doesn't matter much. */
204 phys_map_node_reserve(3 * P_L2_LEVELS);
206 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
209 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
210 Node *nodes, MemoryRegionSection *sections)
215 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
216 if (lp.ptr == PHYS_MAP_NODE_NIL) {
217 return §ions[PHYS_SECTION_UNASSIGNED];
220 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
222 return §ions[lp.ptr];
225 bool memory_region_is_unassigned(MemoryRegion *mr)
227 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
228 && mr != &io_mem_watch;
231 static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
233 bool resolve_subpage)
235 MemoryRegionSection *section;
238 section = phys_page_find(as->dispatch->phys_map, addr >> TARGET_PAGE_BITS,
239 cur_map.nodes, cur_map.sections);
240 if (resolve_subpage && section->mr->subpage) {
241 subpage = container_of(section->mr, subpage_t, iomem);
242 section = &cur_map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
247 static MemoryRegionSection *
248 address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
249 hwaddr *plen, bool resolve_subpage)
251 MemoryRegionSection *section;
254 section = address_space_lookup_region(as, addr, resolve_subpage);
255 /* Compute offset within MemoryRegionSection */
256 addr -= section->offset_within_address_space;
258 /* Compute offset within MemoryRegion */
259 *xlat = addr + section->offset_within_region;
261 diff = int128_sub(section->mr->size, int128_make64(addr));
262 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
266 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
267 hwaddr *xlat, hwaddr *plen,
271 MemoryRegionSection *section;
276 section = address_space_translate_internal(as, addr, &addr, plen, true);
279 if (!mr->iommu_ops) {
283 iotlb = mr->iommu_ops->translate(mr, addr);
284 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
285 | (addr & iotlb.addr_mask));
286 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
287 if (!(iotlb.perm & (1 << is_write))) {
288 mr = &io_mem_unassigned;
292 as = iotlb.target_as;
300 MemoryRegionSection *
301 address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
304 MemoryRegionSection *section;
305 section = address_space_translate_internal(as, addr, xlat, plen, false);
307 assert(!section->mr->iommu_ops);
312 void cpu_exec_init_all(void)
314 #if !defined(CONFIG_USER_ONLY)
315 qemu_mutex_init(&ram_list.mutex);
321 #if !defined(CONFIG_USER_ONLY)
323 static int cpu_common_post_load(void *opaque, int version_id)
325 CPUState *cpu = opaque;
327 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
328 version_id is increased. */
329 cpu->interrupt_request &= ~0x01;
330 tlb_flush(cpu->env_ptr, 1);
335 const VMStateDescription vmstate_cpu_common = {
336 .name = "cpu_common",
338 .minimum_version_id = 1,
339 .minimum_version_id_old = 1,
340 .post_load = cpu_common_post_load,
341 .fields = (VMStateField []) {
342 VMSTATE_UINT32(halted, CPUState),
343 VMSTATE_UINT32(interrupt_request, CPUState),
344 VMSTATE_END_OF_LIST()
350 CPUState *qemu_get_cpu(int index)
352 CPUArchState *env = first_cpu;
353 CPUState *cpu = NULL;
356 cpu = ENV_GET_CPU(env);
357 if (cpu->cpu_index == index) {
363 return env ? cpu : NULL;
366 void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
368 CPUArchState *env = first_cpu;
371 func(ENV_GET_CPU(env), data);
376 void cpu_exec_init(CPUArchState *env)
378 CPUState *cpu = ENV_GET_CPU(env);
379 CPUClass *cc = CPU_GET_CLASS(cpu);
383 #if defined(CONFIG_USER_ONLY)
386 env->next_cpu = NULL;
389 while (*penv != NULL) {
390 penv = &(*penv)->next_cpu;
393 cpu->cpu_index = cpu_index;
395 QTAILQ_INIT(&env->breakpoints);
396 QTAILQ_INIT(&env->watchpoints);
397 #ifndef CONFIG_USER_ONLY
398 cpu->thread_id = qemu_get_thread_id();
401 #if defined(CONFIG_USER_ONLY)
404 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
405 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
406 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
407 cpu_save, cpu_load, env);
408 assert(cc->vmsd == NULL);
410 if (cc->vmsd != NULL) {
411 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
415 #if defined(TARGET_HAS_ICE)
416 #if defined(CONFIG_USER_ONLY)
417 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
419 tb_invalidate_phys_page_range(pc, pc + 1, 0);
422 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
424 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
425 (pc & ~TARGET_PAGE_MASK));
428 #endif /* TARGET_HAS_ICE */
430 #if defined(CONFIG_USER_ONLY)
431 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
436 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
437 int flags, CPUWatchpoint **watchpoint)
442 /* Add a watchpoint. */
443 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
444 int flags, CPUWatchpoint **watchpoint)
446 target_ulong len_mask = ~(len - 1);
449 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
450 if ((len & (len - 1)) || (addr & ~len_mask) ||
451 len == 0 || len > TARGET_PAGE_SIZE) {
452 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
453 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
456 wp = g_malloc(sizeof(*wp));
459 wp->len_mask = len_mask;
462 /* keep all GDB-injected watchpoints in front */
464 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
466 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
468 tlb_flush_page(env, addr);
475 /* Remove a specific watchpoint. */
476 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
479 target_ulong len_mask = ~(len - 1);
482 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
483 if (addr == wp->vaddr && len_mask == wp->len_mask
484 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
485 cpu_watchpoint_remove_by_ref(env, wp);
492 /* Remove a specific watchpoint by reference. */
493 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
495 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
497 tlb_flush_page(env, watchpoint->vaddr);
502 /* Remove all matching watchpoints. */
503 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
505 CPUWatchpoint *wp, *next;
507 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
508 if (wp->flags & mask)
509 cpu_watchpoint_remove_by_ref(env, wp);
514 /* Add a breakpoint. */
515 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
516 CPUBreakpoint **breakpoint)
518 #if defined(TARGET_HAS_ICE)
521 bp = g_malloc(sizeof(*bp));
526 /* keep all GDB-injected breakpoints in front */
528 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
530 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
532 breakpoint_invalidate(env, pc);
542 /* Remove a specific breakpoint. */
543 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
545 #if defined(TARGET_HAS_ICE)
548 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
549 if (bp->pc == pc && bp->flags == flags) {
550 cpu_breakpoint_remove_by_ref(env, bp);
560 /* Remove a specific breakpoint by reference. */
561 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
563 #if defined(TARGET_HAS_ICE)
564 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
566 breakpoint_invalidate(env, breakpoint->pc);
572 /* Remove all matching breakpoints. */
573 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
575 #if defined(TARGET_HAS_ICE)
576 CPUBreakpoint *bp, *next;
578 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
579 if (bp->flags & mask)
580 cpu_breakpoint_remove_by_ref(env, bp);
585 /* enable or disable single step mode. EXCP_DEBUG is returned by the
586 CPU loop after each instruction */
587 void cpu_single_step(CPUArchState *env, int enabled)
589 #if defined(TARGET_HAS_ICE)
590 if (env->singlestep_enabled != enabled) {
591 env->singlestep_enabled = enabled;
593 kvm_update_guest_debug(env, 0);
595 /* must flush all the translated code to avoid inconsistencies */
596 /* XXX: only flush what is necessary */
603 void cpu_abort(CPUArchState *env, const char *fmt, ...)
605 CPUState *cpu = ENV_GET_CPU(env);
611 fprintf(stderr, "qemu: fatal: ");
612 vfprintf(stderr, fmt, ap);
613 fprintf(stderr, "\n");
614 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
615 if (qemu_log_enabled()) {
616 qemu_log("qemu: fatal: ");
617 qemu_log_vprintf(fmt, ap2);
619 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
625 #if defined(CONFIG_USER_ONLY)
627 struct sigaction act;
628 sigfillset(&act.sa_mask);
629 act.sa_handler = SIG_DFL;
630 sigaction(SIGABRT, &act, NULL);
636 CPUArchState *cpu_copy(CPUArchState *env)
638 CPUArchState *new_env = cpu_init(env->cpu_model_str);
639 CPUArchState *next_cpu = new_env->next_cpu;
640 #if defined(TARGET_HAS_ICE)
645 memcpy(new_env, env, sizeof(CPUArchState));
647 /* Preserve chaining. */
648 new_env->next_cpu = next_cpu;
650 /* Clone all break/watchpoints.
651 Note: Once we support ptrace with hw-debug register access, make sure
652 BP_CPU break/watchpoints are handled correctly on clone. */
653 QTAILQ_INIT(&env->breakpoints);
654 QTAILQ_INIT(&env->watchpoints);
655 #if defined(TARGET_HAS_ICE)
656 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
657 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
659 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
660 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
668 #if !defined(CONFIG_USER_ONLY)
669 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
674 /* we modify the TLB cache so that the dirty bit will be set again
675 when accessing the range */
676 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
677 /* Check that we don't span multiple blocks - this breaks the
678 address comparisons below. */
679 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
680 != (end - 1) - start) {
683 cpu_tlb_reset_dirty_all(start1, length);
687 /* Note: start and end must be within the same ram block. */
688 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
693 start &= TARGET_PAGE_MASK;
694 end = TARGET_PAGE_ALIGN(end);
696 length = end - start;
699 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
702 tlb_reset_dirty_range_all(start, end, length);
706 static int cpu_physical_memory_set_dirty_tracking(int enable)
709 in_migration = enable;
713 hwaddr memory_region_section_get_iotlb(CPUArchState *env,
714 MemoryRegionSection *section,
716 hwaddr paddr, hwaddr xlat,
718 target_ulong *address)
723 if (memory_region_is_ram(section->mr)) {
725 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
727 if (!section->readonly) {
728 iotlb |= PHYS_SECTION_NOTDIRTY;
730 iotlb |= PHYS_SECTION_ROM;
733 iotlb = section - cur_map.sections;
737 /* Make accesses to pages with watchpoints go via the
738 watchpoint trap routines. */
739 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
740 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
741 /* Avoid trapping reads of pages with a write breakpoint. */
742 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
743 iotlb = PHYS_SECTION_WATCH + paddr;
744 *address |= TLB_MMIO;
752 #endif /* defined(CONFIG_USER_ONLY) */
754 #if !defined(CONFIG_USER_ONLY)
756 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
758 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
760 static uint16_t phys_section_add(MemoryRegionSection *section)
762 /* The physical section number is ORed with a page-aligned
763 * pointer to produce the iotlb entries. Thus it should
764 * never overflow into the page-aligned value.
766 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
768 if (next_map.sections_nb == next_map.sections_nb_alloc) {
769 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
771 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
772 next_map.sections_nb_alloc);
774 next_map.sections[next_map.sections_nb] = *section;
775 memory_region_ref(section->mr);
776 return next_map.sections_nb++;
779 static void phys_section_destroy(MemoryRegion *mr)
781 memory_region_unref(mr);
784 subpage_t *subpage = container_of(mr, subpage_t, iomem);
785 memory_region_destroy(&subpage->iomem);
790 static void phys_sections_clear(PhysPageMap *map)
792 while (map->sections_nb > 0) {
793 MemoryRegionSection *section = &map->sections[--map->sections_nb];
794 phys_section_destroy(section->mr);
796 g_free(map->sections);
800 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
803 hwaddr base = section->offset_within_address_space
805 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
806 next_map.nodes, next_map.sections);
807 MemoryRegionSection subsection = {
808 .offset_within_address_space = base,
809 .size = int128_make64(TARGET_PAGE_SIZE),
813 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
815 if (!(existing->mr->subpage)) {
816 subpage = subpage_init(d->as, base);
817 subsection.mr = &subpage->iomem;
818 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
819 phys_section_add(&subsection));
821 subpage = container_of(existing->mr, subpage_t, iomem);
823 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
824 end = start + int128_get64(section->size) - 1;
825 subpage_register(subpage, start, end, phys_section_add(section));
829 static void register_multipage(AddressSpaceDispatch *d,
830 MemoryRegionSection *section)
832 hwaddr start_addr = section->offset_within_address_space;
833 uint16_t section_index = phys_section_add(section);
834 uint64_t num_pages = int128_get64(int128_rshift(section->size,
838 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
841 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
843 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
844 AddressSpaceDispatch *d = as->dispatch;
845 MemoryRegionSection now = *section, remain = *section;
846 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
848 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
849 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
850 - now.offset_within_address_space;
852 now.size = int128_min(int128_make64(left), now.size);
853 register_subpage(d, &now);
855 now.size = int128_zero();
857 while (int128_ne(remain.size, now.size)) {
858 remain.size = int128_sub(remain.size, now.size);
859 remain.offset_within_address_space += int128_get64(now.size);
860 remain.offset_within_region += int128_get64(now.size);
862 if (int128_lt(remain.size, page_size)) {
863 register_subpage(d, &now);
864 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
865 now.size = page_size;
866 register_subpage(d, &now);
868 now.size = int128_and(now.size, int128_neg(page_size));
869 register_multipage(d, &now);
874 void qemu_flush_coalesced_mmio_buffer(void)
877 kvm_flush_coalesced_mmio_buffer();
880 void qemu_mutex_lock_ramlist(void)
882 qemu_mutex_lock(&ram_list.mutex);
885 void qemu_mutex_unlock_ramlist(void)
887 qemu_mutex_unlock(&ram_list.mutex);
890 #if defined(__linux__) && !defined(TARGET_S390X)
894 #define HUGETLBFS_MAGIC 0x958458f6
896 static long gethugepagesize(const char *path)
902 ret = statfs(path, &fs);
903 } while (ret != 0 && errno == EINTR);
910 if (fs.f_type != HUGETLBFS_MAGIC)
911 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
916 static void *file_ram_alloc(RAMBlock *block,
921 char *sanitized_name;
928 unsigned long hpagesize;
930 hpagesize = gethugepagesize(path);
935 if (memory < hpagesize) {
939 if (kvm_enabled() && !kvm_has_sync_mmu()) {
940 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
944 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
945 sanitized_name = g_strdup(block->mr->name);
946 for (c = sanitized_name; *c != '\0'; c++) {
951 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
953 g_free(sanitized_name);
955 fd = mkstemp(filename);
957 perror("unable to create backing store for hugepages");
964 memory = (memory+hpagesize-1) & ~(hpagesize-1);
967 * ftruncate is not supported by hugetlbfs in older
968 * hosts, so don't bother bailing out on errors.
969 * If anything goes wrong with it under other filesystems,
972 if (ftruncate(fd, memory))
976 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
977 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
978 * to sidestep this quirk.
980 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
981 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
983 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
985 if (area == MAP_FAILED) {
986 perror("file_ram_alloc: can't mmap RAM pages");
995 static ram_addr_t find_ram_offset(ram_addr_t size)
997 RAMBlock *block, *next_block;
998 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1000 assert(size != 0); /* it would hand out same offset multiple times */
1002 if (QTAILQ_EMPTY(&ram_list.blocks))
1005 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1006 ram_addr_t end, next = RAM_ADDR_MAX;
1008 end = block->offset + block->length;
1010 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1011 if (next_block->offset >= end) {
1012 next = MIN(next, next_block->offset);
1015 if (next - end >= size && next - end < mingap) {
1017 mingap = next - end;
1021 if (offset == RAM_ADDR_MAX) {
1022 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1030 ram_addr_t last_ram_offset(void)
1033 ram_addr_t last = 0;
1035 QTAILQ_FOREACH(block, &ram_list.blocks, next)
1036 last = MAX(last, block->offset + block->length);
1041 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1044 QemuOpts *machine_opts;
1046 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1047 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1049 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1050 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1052 perror("qemu_madvise");
1053 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1054 "but dump_guest_core=off specified\n");
1059 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1061 RAMBlock *new_block, *block;
1064 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1065 if (block->offset == addr) {
1071 assert(!new_block->idstr[0]);
1074 char *id = qdev_get_dev_path(dev);
1076 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1080 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1082 /* This assumes the iothread lock is taken here too. */
1083 qemu_mutex_lock_ramlist();
1084 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1085 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1086 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1091 qemu_mutex_unlock_ramlist();
1094 static int memory_try_enable_merging(void *addr, size_t len)
1098 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1099 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1100 /* disabled by the user */
1104 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1107 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1110 RAMBlock *block, *new_block;
1112 size = TARGET_PAGE_ALIGN(size);
1113 new_block = g_malloc0(sizeof(*new_block));
1115 /* This assumes the iothread lock is taken here too. */
1116 qemu_mutex_lock_ramlist();
1118 new_block->offset = find_ram_offset(size);
1120 new_block->host = host;
1121 new_block->flags |= RAM_PREALLOC_MASK;
1124 #if defined (__linux__) && !defined(TARGET_S390X)
1125 new_block->host = file_ram_alloc(new_block, size, mem_path);
1126 if (!new_block->host) {
1127 new_block->host = qemu_anon_ram_alloc(size);
1128 memory_try_enable_merging(new_block->host, size);
1131 fprintf(stderr, "-mem-path option unsupported\n");
1135 if (xen_enabled()) {
1136 xen_ram_alloc(new_block->offset, size, mr);
1137 } else if (kvm_enabled()) {
1138 /* some s390/kvm configurations have special constraints */
1139 new_block->host = kvm_ram_alloc(size);
1141 new_block->host = qemu_anon_ram_alloc(size);
1143 memory_try_enable_merging(new_block->host, size);
1146 new_block->length = size;
1148 /* Keep the list sorted from biggest to smallest block. */
1149 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1150 if (block->length < new_block->length) {
1155 QTAILQ_INSERT_BEFORE(block, new_block, next);
1157 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1159 ram_list.mru_block = NULL;
1162 qemu_mutex_unlock_ramlist();
1164 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1165 last_ram_offset() >> TARGET_PAGE_BITS);
1166 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1167 0, size >> TARGET_PAGE_BITS);
1168 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1170 qemu_ram_setup_dump(new_block->host, size);
1171 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1174 kvm_setup_guest_memory(new_block->host, size);
1176 return new_block->offset;
1179 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1181 return qemu_ram_alloc_from_ptr(size, NULL, mr);
1184 void qemu_ram_free_from_ptr(ram_addr_t addr)
1188 /* This assumes the iothread lock is taken here too. */
1189 qemu_mutex_lock_ramlist();
1190 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1191 if (addr == block->offset) {
1192 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1193 ram_list.mru_block = NULL;
1199 qemu_mutex_unlock_ramlist();
1202 void qemu_ram_free(ram_addr_t addr)
1206 /* This assumes the iothread lock is taken here too. */
1207 qemu_mutex_lock_ramlist();
1208 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1209 if (addr == block->offset) {
1210 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1211 ram_list.mru_block = NULL;
1213 if (block->flags & RAM_PREALLOC_MASK) {
1215 } else if (mem_path) {
1216 #if defined (__linux__) && !defined(TARGET_S390X)
1218 munmap(block->host, block->length);
1221 qemu_anon_ram_free(block->host, block->length);
1227 if (xen_enabled()) {
1228 xen_invalidate_map_cache_entry(block->host);
1230 qemu_anon_ram_free(block->host, block->length);
1237 qemu_mutex_unlock_ramlist();
1242 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1249 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1250 offset = addr - block->offset;
1251 if (offset < block->length) {
1252 vaddr = block->host + offset;
1253 if (block->flags & RAM_PREALLOC_MASK) {
1257 munmap(vaddr, length);
1259 #if defined(__linux__) && !defined(TARGET_S390X)
1262 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1265 flags |= MAP_PRIVATE;
1267 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1268 flags, block->fd, offset);
1270 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1271 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1278 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1279 flags |= MAP_SHARED | MAP_ANONYMOUS;
1280 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1283 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1284 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1288 if (area != vaddr) {
1289 fprintf(stderr, "Could not remap addr: "
1290 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1294 memory_try_enable_merging(vaddr, length);
1295 qemu_ram_setup_dump(vaddr, length);
1301 #endif /* !_WIN32 */
1303 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
1307 /* The list is protected by the iothread lock here. */
1308 block = ram_list.mru_block;
1309 if (block && addr - block->offset < block->length) {
1312 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1313 if (addr - block->offset < block->length) {
1318 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1322 ram_list.mru_block = block;
1326 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1327 With the exception of the softmmu code in this file, this should
1328 only be used for local memory (e.g. video ram) that the device owns,
1329 and knows it isn't going to access beyond the end of the block.
1331 It should not be used for general purpose DMA.
1332 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1334 void *qemu_get_ram_ptr(ram_addr_t addr)
1336 RAMBlock *block = qemu_get_ram_block(addr);
1338 if (xen_enabled()) {
1339 /* We need to check if the requested address is in the RAM
1340 * because we don't want to map the entire memory in QEMU.
1341 * In that case just map until the end of the page.
1343 if (block->offset == 0) {
1344 return xen_map_cache(addr, 0, 0);
1345 } else if (block->host == NULL) {
1347 xen_map_cache(block->offset, block->length, 1);
1350 return block->host + (addr - block->offset);
1353 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1354 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1356 * ??? Is this still necessary?
1358 static void *qemu_safe_ram_ptr(ram_addr_t addr)
1362 /* The list is protected by the iothread lock here. */
1363 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1364 if (addr - block->offset < block->length) {
1365 if (xen_enabled()) {
1366 /* We need to check if the requested address is in the RAM
1367 * because we don't want to map the entire memory in QEMU.
1368 * In that case just map until the end of the page.
1370 if (block->offset == 0) {
1371 return xen_map_cache(addr, 0, 0);
1372 } else if (block->host == NULL) {
1374 xen_map_cache(block->offset, block->length, 1);
1377 return block->host + (addr - block->offset);
1381 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1387 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1388 * but takes a size argument */
1389 static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
1394 if (xen_enabled()) {
1395 return xen_map_cache(addr, *size, 1);
1399 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1400 if (addr - block->offset < block->length) {
1401 if (addr - block->offset + *size > block->length)
1402 *size = block->length - addr + block->offset;
1403 return block->host + (addr - block->offset);
1407 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1412 /* Some of the softmmu routines need to translate from a host pointer
1413 (typically a TLB entry) back to a ram offset. */
1414 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1417 uint8_t *host = ptr;
1419 if (xen_enabled()) {
1420 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1421 return qemu_get_ram_block(*ram_addr)->mr;
1424 block = ram_list.mru_block;
1425 if (block && block->host && host - block->host < block->length) {
1429 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1430 /* This case append when the block is not mapped. */
1431 if (block->host == NULL) {
1434 if (host - block->host < block->length) {
1442 *ram_addr = block->offset + (host - block->host);
1446 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1447 uint64_t val, unsigned size)
1450 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1451 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1452 tb_invalidate_phys_page_fast(ram_addr, size);
1453 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1457 stb_p(qemu_get_ram_ptr(ram_addr), val);
1460 stw_p(qemu_get_ram_ptr(ram_addr), val);
1463 stl_p(qemu_get_ram_ptr(ram_addr), val);
1468 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1469 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1470 /* we remove the notdirty callback only if the code has been
1472 if (dirty_flags == 0xff)
1473 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
1476 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1477 unsigned size, bool is_write)
1482 static const MemoryRegionOps notdirty_mem_ops = {
1483 .write = notdirty_mem_write,
1484 .valid.accepts = notdirty_mem_accepts,
1485 .endianness = DEVICE_NATIVE_ENDIAN,
1488 /* Generate a debug exception if a watchpoint has been hit. */
1489 static void check_watchpoint(int offset, int len_mask, int flags)
1491 CPUArchState *env = cpu_single_env;
1492 target_ulong pc, cs_base;
1497 if (env->watchpoint_hit) {
1498 /* We re-entered the check after replacing the TB. Now raise
1499 * the debug interrupt so that is will trigger after the
1500 * current instruction. */
1501 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1504 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1505 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1506 if ((vaddr == (wp->vaddr & len_mask) ||
1507 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1508 wp->flags |= BP_WATCHPOINT_HIT;
1509 if (!env->watchpoint_hit) {
1510 env->watchpoint_hit = wp;
1511 tb_check_watchpoint(env);
1512 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1513 env->exception_index = EXCP_DEBUG;
1516 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1517 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1518 cpu_resume_from_signal(env, NULL);
1522 wp->flags &= ~BP_WATCHPOINT_HIT;
1527 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1528 so these check for a hit then pass through to the normal out-of-line
1530 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1533 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1535 case 1: return ldub_phys(addr);
1536 case 2: return lduw_phys(addr);
1537 case 4: return ldl_phys(addr);
1542 static void watch_mem_write(void *opaque, hwaddr addr,
1543 uint64_t val, unsigned size)
1545 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1548 stb_phys(addr, val);
1551 stw_phys(addr, val);
1554 stl_phys(addr, val);
1560 static const MemoryRegionOps watch_mem_ops = {
1561 .read = watch_mem_read,
1562 .write = watch_mem_write,
1563 .endianness = DEVICE_NATIVE_ENDIAN,
1566 static uint64_t subpage_read(void *opaque, hwaddr addr,
1569 subpage_t *subpage = opaque;
1572 #if defined(DEBUG_SUBPAGE)
1573 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1574 subpage, len, addr);
1576 address_space_read(subpage->as, addr + subpage->base, buf, len);
1589 static void subpage_write(void *opaque, hwaddr addr,
1590 uint64_t value, unsigned len)
1592 subpage_t *subpage = opaque;
1595 #if defined(DEBUG_SUBPAGE)
1596 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1597 " value %"PRIx64"\n",
1598 __func__, subpage, len, addr, value);
1613 address_space_write(subpage->as, addr + subpage->base, buf, len);
1616 static bool subpage_accepts(void *opaque, hwaddr addr,
1617 unsigned size, bool is_write)
1619 subpage_t *subpage = opaque;
1620 #if defined(DEBUG_SUBPAGE)
1621 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1622 __func__, subpage, is_write ? 'w' : 'r', len, addr);
1625 return address_space_access_valid(subpage->as, addr + subpage->base,
1629 static const MemoryRegionOps subpage_ops = {
1630 .read = subpage_read,
1631 .write = subpage_write,
1632 .valid.accepts = subpage_accepts,
1633 .endianness = DEVICE_NATIVE_ENDIAN,
1636 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1641 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1643 idx = SUBPAGE_IDX(start);
1644 eidx = SUBPAGE_IDX(end);
1645 #if defined(DEBUG_SUBPAGE)
1646 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
1647 mmio, start, end, idx, eidx, memory);
1649 for (; idx <= eidx; idx++) {
1650 mmio->sub_section[idx] = section;
1656 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1660 mmio = g_malloc0(sizeof(subpage_t));
1664 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1665 "subpage", TARGET_PAGE_SIZE);
1666 mmio->iomem.subpage = true;
1667 #if defined(DEBUG_SUBPAGE)
1668 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1669 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
1671 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1676 static uint16_t dummy_section(MemoryRegion *mr)
1678 MemoryRegionSection section = {
1680 .offset_within_address_space = 0,
1681 .offset_within_region = 0,
1682 .size = int128_2_64(),
1685 return phys_section_add(§ion);
1688 MemoryRegion *iotlb_to_region(hwaddr index)
1690 return cur_map.sections[index & ~TARGET_PAGE_MASK].mr;
1693 static void io_mem_init(void)
1695 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1696 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1697 "unassigned", UINT64_MAX);
1698 memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL,
1699 "notdirty", UINT64_MAX);
1700 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1701 "watch", UINT64_MAX);
1704 static void mem_begin(MemoryListener *listener)
1706 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1707 AddressSpaceDispatch *d = as->dispatch;
1709 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1712 static void core_begin(MemoryListener *listener)
1716 memset(&next_map, 0, sizeof(next_map));
1717 n = dummy_section(&io_mem_unassigned);
1718 assert(n == PHYS_SECTION_UNASSIGNED);
1719 n = dummy_section(&io_mem_notdirty);
1720 assert(n == PHYS_SECTION_NOTDIRTY);
1721 n = dummy_section(&io_mem_rom);
1722 assert(n == PHYS_SECTION_ROM);
1723 n = dummy_section(&io_mem_watch);
1724 assert(n == PHYS_SECTION_WATCH);
1727 /* This listener's commit run after the other AddressSpaceDispatch listeners'.
1728 * All AddressSpaceDispatch instances have switched to the next map.
1730 static void core_commit(MemoryListener *listener)
1732 PhysPageMap info = cur_map;
1734 phys_sections_clear(&info);
1737 static void tcg_commit(MemoryListener *listener)
1741 /* since each CPU stores ram addresses in its TLB cache, we must
1742 reset the modified entries */
1744 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1749 static void core_log_global_start(MemoryListener *listener)
1751 cpu_physical_memory_set_dirty_tracking(1);
1754 static void core_log_global_stop(MemoryListener *listener)
1756 cpu_physical_memory_set_dirty_tracking(0);
1759 static MemoryListener core_memory_listener = {
1760 .begin = core_begin,
1761 .commit = core_commit,
1762 .log_global_start = core_log_global_start,
1763 .log_global_stop = core_log_global_stop,
1767 static MemoryListener tcg_memory_listener = {
1768 .commit = tcg_commit,
1771 void address_space_init_dispatch(AddressSpace *as)
1773 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1775 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1778 as->dispatch_listener = (MemoryListener) {
1780 .region_add = mem_add,
1781 .region_nop = mem_add,
1784 memory_listener_register(&as->dispatch_listener, as);
1787 void address_space_destroy_dispatch(AddressSpace *as)
1789 AddressSpaceDispatch *d = as->dispatch;
1791 memory_listener_unregister(&as->dispatch_listener);
1793 as->dispatch = NULL;
1796 static void memory_map_init(void)
1798 system_memory = g_malloc(sizeof(*system_memory));
1799 memory_region_init(system_memory, NULL, "system", INT64_MAX);
1800 address_space_init(&address_space_memory, system_memory, "memory");
1802 system_io = g_malloc(sizeof(*system_io));
1803 memory_region_init(system_io, NULL, "io", 65536);
1804 address_space_init(&address_space_io, system_io, "I/O");
1806 memory_listener_register(&core_memory_listener, &address_space_memory);
1807 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1810 MemoryRegion *get_system_memory(void)
1812 return system_memory;
1815 MemoryRegion *get_system_io(void)
1820 #endif /* !defined(CONFIG_USER_ONLY) */
1822 /* physical memory access (slow version, mainly for debug) */
1823 #if defined(CONFIG_USER_ONLY)
1824 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
1825 uint8_t *buf, int len, int is_write)
1832 page = addr & TARGET_PAGE_MASK;
1833 l = (page + TARGET_PAGE_SIZE) - addr;
1836 flags = page_get_flags(page);
1837 if (!(flags & PAGE_VALID))
1840 if (!(flags & PAGE_WRITE))
1842 /* XXX: this code should not depend on lock_user */
1843 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1846 unlock_user(p, addr, l);
1848 if (!(flags & PAGE_READ))
1850 /* XXX: this code should not depend on lock_user */
1851 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1854 unlock_user(p, addr, 0);
1865 static void invalidate_and_set_dirty(hwaddr addr,
1868 if (!cpu_physical_memory_is_dirty(addr)) {
1869 /* invalidate code */
1870 tb_invalidate_phys_page_range(addr, addr + length, 0);
1872 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1874 xen_modified_memory(addr, length);
1877 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1879 if (memory_region_is_ram(mr)) {
1880 return !(is_write && mr->readonly);
1882 if (memory_region_is_romd(mr)) {
1889 static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
1891 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
1894 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
1900 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1901 int len, bool is_write)
1912 mr = address_space_translate(as, addr, &addr1, &l, is_write);
1915 if (!memory_access_is_direct(mr, is_write)) {
1916 l = memory_access_size(mr, l, addr1);
1917 /* XXX: could force cpu_single_env to NULL to avoid
1920 /* 32 bit write access */
1922 error |= io_mem_write(mr, addr1, val, 4);
1923 } else if (l == 2) {
1924 /* 16 bit write access */
1926 error |= io_mem_write(mr, addr1, val, 2);
1928 /* 8 bit write access */
1930 error |= io_mem_write(mr, addr1, val, 1);
1933 addr1 += memory_region_get_ram_addr(mr);
1935 ptr = qemu_get_ram_ptr(addr1);
1936 memcpy(ptr, buf, l);
1937 invalidate_and_set_dirty(addr1, l);
1940 if (!memory_access_is_direct(mr, is_write)) {
1942 l = memory_access_size(mr, l, addr1);
1944 /* 32 bit read access */
1945 error |= io_mem_read(mr, addr1, &val, 4);
1947 } else if (l == 2) {
1948 /* 16 bit read access */
1949 error |= io_mem_read(mr, addr1, &val, 2);
1952 /* 8 bit read access */
1953 error |= io_mem_read(mr, addr1, &val, 1);
1958 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
1959 memcpy(buf, ptr, l);
1970 bool address_space_write(AddressSpace *as, hwaddr addr,
1971 const uint8_t *buf, int len)
1973 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
1976 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
1978 return address_space_rw(as, addr, buf, len, false);
1982 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
1983 int len, int is_write)
1985 address_space_rw(&address_space_memory, addr, buf, len, is_write);
1988 /* used for ROM loading : can write in RAM and ROM */
1989 void cpu_physical_memory_write_rom(hwaddr addr,
1990 const uint8_t *buf, int len)
1999 mr = address_space_translate(&address_space_memory,
2000 addr, &addr1, &l, true);
2002 if (!(memory_region_is_ram(mr) ||
2003 memory_region_is_romd(mr))) {
2006 addr1 += memory_region_get_ram_addr(mr);
2008 ptr = qemu_get_ram_ptr(addr1);
2009 memcpy(ptr, buf, l);
2010 invalidate_and_set_dirty(addr1, l);
2025 static BounceBuffer bounce;
2027 typedef struct MapClient {
2029 void (*callback)(void *opaque);
2030 QLIST_ENTRY(MapClient) link;
2033 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2034 = QLIST_HEAD_INITIALIZER(map_client_list);
2036 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2038 MapClient *client = g_malloc(sizeof(*client));
2040 client->opaque = opaque;
2041 client->callback = callback;
2042 QLIST_INSERT_HEAD(&map_client_list, client, link);
2046 static void cpu_unregister_map_client(void *_client)
2048 MapClient *client = (MapClient *)_client;
2050 QLIST_REMOVE(client, link);
2054 static void cpu_notify_map_clients(void)
2058 while (!QLIST_EMPTY(&map_client_list)) {
2059 client = QLIST_FIRST(&map_client_list);
2060 client->callback(client->opaque);
2061 cpu_unregister_map_client(client);
2065 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2072 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2073 if (!memory_access_is_direct(mr, is_write)) {
2074 l = memory_access_size(mr, l, addr);
2075 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2086 /* Map a physical memory region into a host virtual address.
2087 * May map a subset of the requested range, given by and returned in *plen.
2088 * May return NULL if resources needed to perform the mapping are exhausted.
2089 * Use only for reads OR writes - not for read-modify-write operations.
2090 * Use cpu_register_map_client() to know when retrying the map operation is
2091 * likely to succeed.
2093 void *address_space_map(AddressSpace *as,
2100 hwaddr l, xlat, base;
2101 MemoryRegion *mr, *this_mr;
2109 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2110 if (!memory_access_is_direct(mr, is_write)) {
2111 if (bounce.buffer) {
2114 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2118 memory_region_ref(mr);
2121 address_space_read(as, addr, bounce.buffer, l);
2125 return bounce.buffer;
2129 raddr = memory_region_get_ram_addr(mr);
2140 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2141 if (this_mr != mr || xlat != base + done) {
2146 memory_region_ref(mr);
2148 return qemu_ram_ptr_length(raddr + base, plen);
2151 /* Unmaps a memory region previously mapped by address_space_map().
2152 * Will also mark the memory as dirty if is_write == 1. access_len gives
2153 * the amount of memory that was actually read or written by the caller.
2155 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2156 int is_write, hwaddr access_len)
2158 if (buffer != bounce.buffer) {
2162 mr = qemu_ram_addr_from_host(buffer, &addr1);
2165 while (access_len) {
2167 l = TARGET_PAGE_SIZE;
2170 invalidate_and_set_dirty(addr1, l);
2175 if (xen_enabled()) {
2176 xen_invalidate_map_cache_entry(buffer);
2178 memory_region_unref(mr);
2182 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2184 qemu_vfree(bounce.buffer);
2185 bounce.buffer = NULL;
2186 memory_region_unref(bounce.mr);
2187 cpu_notify_map_clients();
2190 void *cpu_physical_memory_map(hwaddr addr,
2194 return address_space_map(&address_space_memory, addr, plen, is_write);
2197 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2198 int is_write, hwaddr access_len)
2200 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2203 /* warning: addr must be aligned */
2204 static inline uint32_t ldl_phys_internal(hwaddr addr,
2205 enum device_endian endian)
2213 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2215 if (l < 4 || !memory_access_is_direct(mr, false)) {
2217 io_mem_read(mr, addr1, &val, 4);
2218 #if defined(TARGET_WORDS_BIGENDIAN)
2219 if (endian == DEVICE_LITTLE_ENDIAN) {
2223 if (endian == DEVICE_BIG_ENDIAN) {
2229 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2233 case DEVICE_LITTLE_ENDIAN:
2234 val = ldl_le_p(ptr);
2236 case DEVICE_BIG_ENDIAN:
2237 val = ldl_be_p(ptr);
2247 uint32_t ldl_phys(hwaddr addr)
2249 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2252 uint32_t ldl_le_phys(hwaddr addr)
2254 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2257 uint32_t ldl_be_phys(hwaddr addr)
2259 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2262 /* warning: addr must be aligned */
2263 static inline uint64_t ldq_phys_internal(hwaddr addr,
2264 enum device_endian endian)
2272 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2274 if (l < 8 || !memory_access_is_direct(mr, false)) {
2276 io_mem_read(mr, addr1, &val, 8);
2277 #if defined(TARGET_WORDS_BIGENDIAN)
2278 if (endian == DEVICE_LITTLE_ENDIAN) {
2282 if (endian == DEVICE_BIG_ENDIAN) {
2288 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2292 case DEVICE_LITTLE_ENDIAN:
2293 val = ldq_le_p(ptr);
2295 case DEVICE_BIG_ENDIAN:
2296 val = ldq_be_p(ptr);
2306 uint64_t ldq_phys(hwaddr addr)
2308 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2311 uint64_t ldq_le_phys(hwaddr addr)
2313 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2316 uint64_t ldq_be_phys(hwaddr addr)
2318 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2322 uint32_t ldub_phys(hwaddr addr)
2325 cpu_physical_memory_read(addr, &val, 1);
2329 /* warning: addr must be aligned */
2330 static inline uint32_t lduw_phys_internal(hwaddr addr,
2331 enum device_endian endian)
2339 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2341 if (l < 2 || !memory_access_is_direct(mr, false)) {
2343 io_mem_read(mr, addr1, &val, 2);
2344 #if defined(TARGET_WORDS_BIGENDIAN)
2345 if (endian == DEVICE_LITTLE_ENDIAN) {
2349 if (endian == DEVICE_BIG_ENDIAN) {
2355 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2359 case DEVICE_LITTLE_ENDIAN:
2360 val = lduw_le_p(ptr);
2362 case DEVICE_BIG_ENDIAN:
2363 val = lduw_be_p(ptr);
2373 uint32_t lduw_phys(hwaddr addr)
2375 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2378 uint32_t lduw_le_phys(hwaddr addr)
2380 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2383 uint32_t lduw_be_phys(hwaddr addr)
2385 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2388 /* warning: addr must be aligned. The ram page is not masked as dirty
2389 and the code inside is not invalidated. It is useful if the dirty
2390 bits are used to track modified PTEs */
2391 void stl_phys_notdirty(hwaddr addr, uint32_t val)
2398 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2400 if (l < 4 || !memory_access_is_direct(mr, true)) {
2401 io_mem_write(mr, addr1, val, 4);
2403 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2404 ptr = qemu_get_ram_ptr(addr1);
2407 if (unlikely(in_migration)) {
2408 if (!cpu_physical_memory_is_dirty(addr1)) {
2409 /* invalidate code */
2410 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2412 cpu_physical_memory_set_dirty_flags(
2413 addr1, (0xff & ~CODE_DIRTY_FLAG));
2419 /* warning: addr must be aligned */
2420 static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2421 enum device_endian endian)
2428 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2430 if (l < 4 || !memory_access_is_direct(mr, true)) {
2431 #if defined(TARGET_WORDS_BIGENDIAN)
2432 if (endian == DEVICE_LITTLE_ENDIAN) {
2436 if (endian == DEVICE_BIG_ENDIAN) {
2440 io_mem_write(mr, addr1, val, 4);
2443 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2444 ptr = qemu_get_ram_ptr(addr1);
2446 case DEVICE_LITTLE_ENDIAN:
2449 case DEVICE_BIG_ENDIAN:
2456 invalidate_and_set_dirty(addr1, 4);
2460 void stl_phys(hwaddr addr, uint32_t val)
2462 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2465 void stl_le_phys(hwaddr addr, uint32_t val)
2467 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2470 void stl_be_phys(hwaddr addr, uint32_t val)
2472 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2476 void stb_phys(hwaddr addr, uint32_t val)
2479 cpu_physical_memory_write(addr, &v, 1);
2482 /* warning: addr must be aligned */
2483 static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2484 enum device_endian endian)
2491 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2493 if (l < 2 || !memory_access_is_direct(mr, true)) {
2494 #if defined(TARGET_WORDS_BIGENDIAN)
2495 if (endian == DEVICE_LITTLE_ENDIAN) {
2499 if (endian == DEVICE_BIG_ENDIAN) {
2503 io_mem_write(mr, addr1, val, 2);
2506 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2507 ptr = qemu_get_ram_ptr(addr1);
2509 case DEVICE_LITTLE_ENDIAN:
2512 case DEVICE_BIG_ENDIAN:
2519 invalidate_and_set_dirty(addr1, 2);
2523 void stw_phys(hwaddr addr, uint32_t val)
2525 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2528 void stw_le_phys(hwaddr addr, uint32_t val)
2530 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2533 void stw_be_phys(hwaddr addr, uint32_t val)
2535 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2539 void stq_phys(hwaddr addr, uint64_t val)
2542 cpu_physical_memory_write(addr, &val, 8);
2545 void stq_le_phys(hwaddr addr, uint64_t val)
2547 val = cpu_to_le64(val);
2548 cpu_physical_memory_write(addr, &val, 8);
2551 void stq_be_phys(hwaddr addr, uint64_t val)
2553 val = cpu_to_be64(val);
2554 cpu_physical_memory_write(addr, &val, 8);
2557 /* virtual memory access for debug (includes writing to ROM) */
2558 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
2559 uint8_t *buf, int len, int is_write)
2566 page = addr & TARGET_PAGE_MASK;
2567 phys_addr = cpu_get_phys_page_debug(env, page);
2568 /* if no physical page mapped, return an error */
2569 if (phys_addr == -1)
2571 l = (page + TARGET_PAGE_SIZE) - addr;
2574 phys_addr += (addr & ~TARGET_PAGE_MASK);
2576 cpu_physical_memory_write_rom(phys_addr, buf, l);
2578 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2587 #if !defined(CONFIG_USER_ONLY)
2590 * A helper function for the _utterly broken_ virtio device model to find out if
2591 * it's running on a big endian machine. Don't do this at home kids!
2593 bool virtio_is_big_endian(void);
2594 bool virtio_is_big_endian(void)
2596 #if defined(TARGET_WORDS_BIGENDIAN)
2605 #ifndef CONFIG_USER_ONLY
2606 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2611 mr = address_space_translate(&address_space_memory,
2612 phys_addr, &phys_addr, &l, false);
2614 return !(memory_region_is_ram(mr) ||
2615 memory_region_is_romd(mr));
2618 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2622 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2623 func(block->host, block->offset, block->length, opaque);