X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/8629d3fcb77e9775e44d9051bad0fb5187925eae..efc75e2a4cf7dfa62c7ccaa9a1016f27e5519003:/exec.c diff --git a/exec.c b/exec.c index b085f82503..4d8addb263 100644 --- a/exec.c +++ b/exec.c @@ -18,8 +18,6 @@ */ #include "qemu/osdep.h" #include "qapi/error.h" -#ifndef _WIN32 -#endif #include "qemu/cutils.h" #include "cpu.h" @@ -51,7 +49,6 @@ #include "trace-root.h" #ifdef CONFIG_FALLOCATE_PUNCH_HOLE -#include #include #endif @@ -120,8 +117,6 @@ int use_icount; uintptr_t qemu_host_page_size; intptr_t qemu_host_page_mask; -uintptr_t qemu_real_host_page_size; -intptr_t qemu_real_host_page_mask; bool set_preferred_target_page_bits(int bits) { @@ -412,22 +407,16 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, { MemoryRegionSection *section = atomic_read(&d->mru_section); subpage_t *subpage; - bool update; - if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] && - section_covers_addr(section, addr)) { - update = false; - } else { + if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || + !section_covers_addr(section, addr)) { section = phys_page_find(d, addr); - update = true; + atomic_set(&d->mru_section, section); } if (resolve_subpage && section->mr->subpage) { subpage = container_of(section->mr, subpage_t, iomem); section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; } - if (update) { - atomic_set(&d->mru_section, section); - } return section; } @@ -467,11 +456,29 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x return section; } -/* Called from RCU critical section */ +/** + * flatview_do_translate - translate an address in FlatView + * + * @fv: the flat view that we want to translate on + * @addr: the address to be translated in above address space + * @xlat: the translated address offset within memory region. It + * cannot be @NULL. + * @plen_out: valid read/write length of the translated address. It + * can be @NULL when we don't care about it. + * @page_mask_out: page mask for the translated address. This + * should only be meaningful for IOMMU translated + * addresses, since there may be huge pages that this bit + * would tell. It can be @NULL if we don't care about it. + * @is_write: whether the translation operation is for write + * @is_mmio: whether this can be MMIO, set true if it can + * + * This function is called from RCU critical section + */ static MemoryRegionSection flatview_do_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, - hwaddr *plen, + hwaddr *plen_out, + hwaddr *page_mask_out, bool is_write, bool is_mmio, AddressSpace **target_as) @@ -480,11 +487,17 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv, MemoryRegionSection *section; IOMMUMemoryRegion *iommu_mr; IOMMUMemoryRegionClass *imrc; + hwaddr page_mask = (hwaddr)(-1); + hwaddr plen = (hwaddr)(-1); + + if (plen_out) { + plen = *plen_out; + } for (;;) { section = address_space_translate_internal( flatview_to_dispatch(fv), addr, &addr, - plen, is_mmio); + &plen, is_mmio); iommu_mr = memory_region_get_iommu(section->mr); if (!iommu_mr) { @@ -496,7 +509,8 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv, IOMMU_WO : IOMMU_RO); addr = ((iotlb.translated_addr & ~iotlb.addr_mask) | (addr & iotlb.addr_mask)); - *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); + page_mask &= iotlb.addr_mask; + plen = MIN(plen, (addr | iotlb.addr_mask) - addr + 1); if (!(iotlb.perm & (1 << is_write))) { goto translate_fail; } @@ -507,6 +521,19 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv, *xlat = addr; + if (page_mask == (hwaddr)(-1)) { + /* Not behind an IOMMU, use default page size. */ + page_mask = ~TARGET_PAGE_MASK; + } + + if (page_mask_out) { + *page_mask_out = page_mask; + } + + if (plen_out) { + *plen_out = plen; + } + return *section; translate_fail: @@ -518,14 +545,14 @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, bool is_write) { MemoryRegionSection section; - hwaddr xlat, plen; - - /* Try to get maximum page mask during translation. */ - plen = (hwaddr)-1; + hwaddr xlat, page_mask; - /* This can never be MMIO. */ - section = flatview_do_translate(address_space_to_flatview(as), addr, - &xlat, &plen, is_write, false, &as); + /* + * This can never be MMIO, and we don't really care about plen, + * but page mask. + */ + section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat, + NULL, &page_mask, is_write, false, &as); /* Illegal translation */ if (section.mr == &io_mem_unassigned) { @@ -536,22 +563,11 @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, xlat += section.offset_within_address_space - section.offset_within_region; - if (plen == (hwaddr)-1) { - /* - * We use default page size here. Logically it only happens - * for identity mappings. - */ - plen = TARGET_PAGE_SIZE; - } - - /* Convert to address mask */ - plen -= 1; - return (IOMMUTLBEntry) { .target_as = as, - .iova = addr & ~plen, - .translated_addr = xlat & ~plen, - .addr_mask = plen, + .iova = addr & ~page_mask, + .translated_addr = xlat & ~page_mask, + .addr_mask = page_mask, /* IOTLBs are for DMAs, and DMA only allows on RAMs. */ .perm = IOMMU_RW, }; @@ -569,7 +585,8 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, AddressSpace *as = NULL; /* This can be MMIO, so setup MMIO bit. */ - section = flatview_do_translate(fv, addr, xlat, plen, is_write, true, &as); + section = flatview_do_translate(fv, addr, xlat, plen, NULL, + is_write, true, &as); mr = section.mr; if (xen_enabled() && memory_access_is_direct(mr, is_write)) { @@ -606,6 +623,13 @@ static int cpu_common_post_load(void *opaque, int version_id) cpu->interrupt_request &= ~0x01; tlb_flush(cpu); + /* loadvm has just updated the content of RAM, bypassing the + * usual mechanisms that ensure we flush TBs for writes to + * memory we've translated code from. So we must flush all TBs, + * which will now be stale. + */ + tb_flush(cpu); + return 0; } @@ -688,9 +712,17 @@ CPUState *qemu_get_cpu(int index) } #if !defined(CONFIG_USER_ONLY) -void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx) +void cpu_address_space_init(CPUState *cpu, int asidx, + const char *prefix, MemoryRegion *mr) { CPUAddressSpace *newas; + AddressSpace *as = g_new0(AddressSpace, 1); + char *as_name; + + assert(mr); + as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index); + address_space_init(as, mr, as_name); + g_free(as_name); /* Target code should have set num_ases before calling us */ assert(asidx < cpu->num_ases); @@ -765,10 +797,16 @@ void cpu_exec_initfn(CPUState *cpu) void cpu_exec_realizefn(CPUState *cpu, Error **errp) { - CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu); + CPUClass *cc = CPU_GET_CLASS(cpu); + static bool tcg_target_initialized; cpu_list_add(cpu); + if (tcg_enabled() && !tcg_target_initialized) { + tcg_target_initialized = true; + cc->tcg_initialize(); + } + #ifndef CONFIG_USER_ONLY if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); @@ -1247,7 +1285,7 @@ static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, uint16_t section); static subpage_t *subpage_init(FlatView *fv, hwaddr base); -static void *(*phys_mem_alloc)(size_t size, uint64_t *align) = +static void *(*phys_mem_alloc)(size_t size, uint64_t *align, bool shared) = qemu_anon_ram_alloc; /* @@ -1255,7 +1293,7 @@ static void *(*phys_mem_alloc)(size_t size, uint64_t *align) = * Accelerators with unusual needs may need this. Hopefully, we can * get rid of it eventually. */ -void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)) +void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align, bool shared)) { phys_mem_alloc = alloc; } @@ -1574,7 +1612,13 @@ static void *file_ram_alloc(RAMBlock *block, void *area; block->page_size = qemu_fd_getpagesize(fd); - block->mr->align = block->page_size; + if (block->mr->align % block->page_size) { + error_setg(errp, "alignment 0x%" PRIx64 + " must be multiples of page size 0x%zx", + block->mr->align, block->page_size); + return NULL; + } + block->mr->align = MAX(block->page_size, block->mr->align); #if defined(__s390x__) if (kvm_enabled()) { block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); @@ -1629,7 +1673,10 @@ static void *file_ram_alloc(RAMBlock *block, } #endif -/* Called with the ramlist lock held. */ +/* Allocate space within the ram_addr_t space that governs the + * dirty bitmaps. + * Called with the ramlist lock held. + */ static ram_addr_t find_ram_offset(ram_addr_t size) { RAMBlock *block, *next_block; @@ -1642,19 +1689,33 @@ static ram_addr_t find_ram_offset(ram_addr_t size) } RAMBLOCK_FOREACH(block) { - ram_addr_t end, next = RAM_ADDR_MAX; + ram_addr_t candidate, next = RAM_ADDR_MAX; - end = block->offset + block->max_length; + /* Align blocks to start on a 'long' in the bitmap + * which makes the bitmap sync'ing take the fast path. + */ + candidate = block->offset + block->max_length; + candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS); + /* Search for the closest following block + * and find the gap. + */ RAMBLOCK_FOREACH(next_block) { - if (next_block->offset >= end) { + if (next_block->offset >= candidate) { next = MIN(next, next_block->offset); } } - if (next - end >= size && next - end < mingap) { - offset = end; - mingap = next - end; + + /* If it fits remember our place and remember the size + * of gap, but keep going so that we might find a smaller + * gap to fill so avoiding fragmentation. + */ + if (next - candidate >= size && next - candidate < mingap) { + offset = candidate; + mingap = next - candidate; } + + trace_find_ram_offset_loop(size, candidate, offset, next, mingap); } if (offset == RAM_ADDR_MAX) { @@ -1663,6 +1724,8 @@ static ram_addr_t find_ram_offset(ram_addr_t size) abort(); } + trace_find_ram_offset(size, offset); + return offset; } @@ -1858,7 +1921,7 @@ static void dirty_memory_extend(ram_addr_t old_ram_size, } } -static void ram_block_add(RAMBlock *new_block, Error **errp) +static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared) { RAMBlock *block; RAMBlock *last_block = NULL; @@ -1881,7 +1944,7 @@ static void ram_block_add(RAMBlock *new_block, Error **errp) } } else { new_block->host = phys_mem_alloc(new_block->max_length, - &new_block->mr->align); + &new_block->mr->align, shared); if (!new_block->host) { error_setg_errno(errp, errno, "cannot set up guest memory '%s'", @@ -1986,7 +2049,7 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, return NULL; } - ram_block_add(new_block, &local_err); + ram_block_add(new_block, &local_err, share); if (local_err) { g_free(new_block); error_propagate(errp, local_err); @@ -2028,7 +2091,7 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, void (*resized)(const char*, uint64_t length, void *host), - void *host, bool resizeable, + void *host, bool resizeable, bool share, MemoryRegion *mr, Error **errp) { RAMBlock *new_block; @@ -2051,7 +2114,7 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, if (resizeable) { new_block->flags |= RAM_RESIZEABLE; } - ram_block_add(new_block, &local_err); + ram_block_add(new_block, &local_err, share); if (local_err) { g_free(new_block); error_propagate(errp, local_err); @@ -2063,12 +2126,15 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, MemoryRegion *mr, Error **errp) { - return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp); + return qemu_ram_alloc_internal(size, size, NULL, host, false, + false, mr, errp); } -RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp) +RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, + MemoryRegion *mr, Error **errp) { - return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp); + return qemu_ram_alloc_internal(size, size, NULL, NULL, false, + share, mr, errp); } RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, @@ -2077,7 +2143,8 @@ RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, void *host), MemoryRegion *mr, Error **errp) { - return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp); + return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, + false, mr, errp); } static void reclaim_ramblock(RAMBlock *block) @@ -2153,9 +2220,9 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) flags, -1, 0); } if (area != vaddr) { - fprintf(stderr, "Could not remap addr: " - RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", - length, addr); + error_report("Could not remap addr: " + RAM_ADDR_FMT "@" RAM_ADDR_FMT "", + length, addr); exit(1); } memory_try_enable_merging(vaddr, length); @@ -2328,18 +2395,55 @@ ram_addr_t qemu_ram_addr_from_host(void *ptr) return block->offset + offset; } -/* Called within RCU critical section. */ -static void notdirty_mem_write(void *opaque, hwaddr ram_addr, - uint64_t val, unsigned size) +/* Called within RCU critical section. */ +void memory_notdirty_write_prepare(NotDirtyInfo *ndi, + CPUState *cpu, + vaddr mem_vaddr, + ram_addr_t ram_addr, + unsigned size) { - bool locked = false; + ndi->cpu = cpu; + ndi->ram_addr = ram_addr; + ndi->mem_vaddr = mem_vaddr; + ndi->size = size; + ndi->locked = false; assert(tcg_enabled()); if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { - locked = true; + ndi->locked = true; tb_lock(); tb_invalidate_phys_page_fast(ram_addr, size); } +} + +/* Called within RCU critical section. */ +void memory_notdirty_write_complete(NotDirtyInfo *ndi) +{ + if (ndi->locked) { + tb_unlock(); + } + + /* Set both VGA and migration bits for simplicity and to remove + * the notdirty callback faster. + */ + cpu_physical_memory_set_dirty_range(ndi->ram_addr, ndi->size, + DIRTY_CLIENTS_NOCODE); + /* we remove the notdirty callback only if the code has been + flushed */ + if (!cpu_physical_memory_is_clean(ndi->ram_addr)) { + tlb_set_dirty(ndi->cpu, ndi->mem_vaddr); + } +} + +/* Called within RCU critical section. */ +static void notdirty_mem_write(void *opaque, hwaddr ram_addr, + uint64_t val, unsigned size) +{ + NotDirtyInfo ndi; + + memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr, + ram_addr, size); + switch (size) { case 1: stb_p(qemu_map_ram_ptr(NULL, ram_addr), val); @@ -2350,24 +2454,13 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr, case 4: stl_p(qemu_map_ram_ptr(NULL, ram_addr), val); break; + case 8: + stq_p(qemu_map_ram_ptr(NULL, ram_addr), val); + break; default: abort(); } - - if (locked) { - tb_unlock(); - } - - /* Set both VGA and migration bits for simplicity and to remove - * the notdirty callback faster. - */ - cpu_physical_memory_set_dirty_range(ram_addr, size, - DIRTY_CLIENTS_NOCODE); - /* we remove the notdirty callback only if the code has been - flushed */ - if (!cpu_physical_memory_is_clean(ram_addr)) { - tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr); - } + memory_notdirty_write_complete(&ndi); } static bool notdirty_mem_accepts(void *opaque, hwaddr addr, @@ -2380,6 +2473,16 @@ static const MemoryRegionOps notdirty_mem_ops = { .write = notdirty_mem_write, .valid.accepts = notdirty_mem_accepts, .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = false, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = false, + }, }; /* Generate a debug exception if a watchpoint has been hit. */ @@ -2387,11 +2490,8 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) { CPUState *cpu = current_cpu; CPUClass *cc = CPU_GET_CLASS(cpu); - CPUArchState *env = cpu->env_ptr; - target_ulong pc, cs_base; target_ulong vaddr; CPUWatchpoint *wp; - uint32_t cpu_flags; assert(tcg_enabled()); if (cpu->watchpoint_hit) { @@ -2431,8 +2531,8 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) cpu->exception_index = EXCP_DEBUG; cpu_loop_exit(cpu); } else { - cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); - tb_gen_code(cpu, pc, cs_base, cpu_flags, 1); + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | curr_cflags(); cpu_loop_exit_noexc(cpu); } } @@ -2464,6 +2564,9 @@ static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata, case 4: data = address_space_ldl(as, addr, attrs, &res); break; + case 8: + data = address_space_ldq(as, addr, attrs, &res); + break; default: abort(); } *pdata = data; @@ -2489,6 +2592,9 @@ static MemTxResult watch_mem_write(void *opaque, hwaddr addr, case 4: address_space_stl(as, addr, val, attrs, &res); break; + case 8: + address_space_stq(as, addr, val, attrs, &res); + break; default: abort(); } return res; @@ -2498,6 +2604,16 @@ static const MemoryRegionOps watch_mem_ops = { .read_with_attrs = watch_mem_read, .write_with_attrs = watch_mem_write, .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = false, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = false, + }, }; static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, @@ -2645,6 +2761,37 @@ static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr) return phys_section_add(map, §ion); } +static void readonly_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + /* Ignore any write to ROM. */ +} + +static bool readonly_mem_accepts(void *opaque, hwaddr addr, + unsigned size, bool is_write) +{ + return is_write; +} + +/* This will only be used for writes, because reads are special cased + * to directly access the underlying host ram. + */ +static const MemoryRegionOps readonly_mem_ops = { + .write = readonly_mem_write, + .valid.accepts = readonly_mem_accepts, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = false, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = false, + }, +}; + MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs) { int asidx = cpu_asidx_from_attrs(cpu, attrs); @@ -2657,7 +2804,8 @@ MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs) static void io_mem_init(void) { - memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX); + memory_region_init_io(&io_mem_rom, NULL, &readonly_mem_ops, + NULL, NULL, UINT64_MAX); memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX); @@ -3606,8 +3754,6 @@ void page_size_init(void) { /* NOTE: we can always suppose that qemu_host_page_size >= TARGET_PAGE_SIZE */ - qemu_real_host_page_size = getpagesize(); - qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size; if (qemu_host_page_size == 0) { qemu_host_page_size = qemu_real_host_page_size; } @@ -3616,3 +3762,87 @@ void page_size_init(void) } qemu_host_page_mask = -(intptr_t)qemu_host_page_size; } + +#if !defined(CONFIG_USER_ONLY) + +static void mtree_print_phys_entries(fprintf_function mon, void *f, + int start, int end, int skip, int ptr) +{ + if (start == end - 1) { + mon(f, "\t%3d ", start); + } else { + mon(f, "\t%3d..%-3d ", start, end - 1); + } + mon(f, " skip=%d ", skip); + if (ptr == PHYS_MAP_NODE_NIL) { + mon(f, " ptr=NIL"); + } else if (!skip) { + mon(f, " ptr=#%d", ptr); + } else { + mon(f, " ptr=[%d]", ptr); + } + mon(f, "\n"); +} + +#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ + int128_sub((size), int128_one())) : 0) + +void mtree_print_dispatch(fprintf_function mon, void *f, + AddressSpaceDispatch *d, MemoryRegion *root) +{ + int i; + + mon(f, " Dispatch\n"); + mon(f, " Physical sections\n"); + + for (i = 0; i < d->map.sections_nb; ++i) { + MemoryRegionSection *s = d->map.sections + i; + const char *names[] = { " [unassigned]", " [not dirty]", + " [ROM]", " [watch]" }; + + mon(f, " #%d @" TARGET_FMT_plx ".." TARGET_FMT_plx " %s%s%s%s%s", + i, + s->offset_within_address_space, + s->offset_within_address_space + MR_SIZE(s->mr->size), + s->mr->name ? s->mr->name : "(noname)", + i < ARRAY_SIZE(names) ? names[i] : "", + s->mr == root ? " [ROOT]" : "", + s == d->mru_section ? " [MRU]" : "", + s->mr->is_iommu ? " [iommu]" : ""); + + if (s->mr->alias) { + mon(f, " alias=%s", s->mr->alias->name ? + s->mr->alias->name : "noname"); + } + mon(f, "\n"); + } + + mon(f, " Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n", + P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip); + for (i = 0; i < d->map.nodes_nb; ++i) { + int j, jprev; + PhysPageEntry prev; + Node *n = d->map.nodes + i; + + mon(f, " [%d]\n", i); + + for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) { + PhysPageEntry *pe = *n + j; + + if (pe->ptr == prev.ptr && pe->skip == prev.skip) { + continue; + } + + mtree_print_phys_entries(mon, f, jprev, j, prev.skip, prev.ptr); + + jprev = j; + prev = *pe; + } + + if (jprev != ARRAY_SIZE(*n)) { + mtree_print_phys_entries(mon, f, jprev, j, prev.skip, prev.ptr); + } + } +} + +#endif