+ subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
+ >> IO_MEM_SHIFT];
+ }
+ subpage_register(subpage, start_addr2, end_addr2, phys_offset,
+ region_offset);
+ p->region_offset = 0;
+ } else {
+ p->phys_offset = phys_offset;
+ if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
+ (phys_offset & IO_MEM_ROMD))
+ phys_offset += TARGET_PAGE_SIZE;
+ }
+ } else {
+ p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
+ p->phys_offset = phys_offset;
+ p->region_offset = region_offset;
+ if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
+ (phys_offset & IO_MEM_ROMD)) {
+ phys_offset += TARGET_PAGE_SIZE;
+ } else {
+ target_phys_addr_t start_addr2, end_addr2;
+ int need_subpage = 0;
+
+ CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
+ end_addr2, need_subpage);
+
+ if (need_subpage) {
+ subpage = subpage_init((addr & TARGET_PAGE_MASK),
+ &p->phys_offset, IO_MEM_UNASSIGNED,
+ addr & TARGET_PAGE_MASK);
+ subpage_register(subpage, start_addr2, end_addr2,
+ phys_offset, region_offset);
+ p->region_offset = 0;
+ }
+ }
+ }
+ region_offset += TARGET_PAGE_SIZE;
+ addr += TARGET_PAGE_SIZE;
+ } while (addr != end_addr);
+
+ /* since each CPU stores ram addresses in its TLB cache, we must
+ reset the modified entries */
+ /* XXX: slow ! */
+ for(env = first_cpu; env != NULL; env = env->next_cpu) {
+ tlb_flush(env, 1);
+ }
+}
+
+/* XXX: temporary until new memory mapping API */
+ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
+{
+ PhysPageDesc *p;
+
+ p = phys_page_find(addr >> TARGET_PAGE_BITS);
+ if (!p)
+ return IO_MEM_UNASSIGNED;
+ return p->phys_offset;
+}
+
+void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
+{
+ if (kvm_enabled())
+ kvm_coalesce_mmio_region(addr, size);
+}
+
+void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
+{
+ if (kvm_enabled())
+ kvm_uncoalesce_mmio_region(addr, size);
+}
+
+void qemu_flush_coalesced_mmio_buffer(void)
+{
+ if (kvm_enabled())
+ kvm_flush_coalesced_mmio_buffer();
+}
+
+#if defined(__linux__) && !defined(TARGET_S390X)
+
+#include <sys/vfs.h>
+
+#define HUGETLBFS_MAGIC 0x958458f6
+
+static long gethugepagesize(const char *path)
+{
+ struct statfs fs;
+ int ret;
+
+ do {
+ ret = statfs(path, &fs);
+ } while (ret != 0 && errno == EINTR);
+
+ if (ret != 0) {
+ perror(path);
+ return 0;
+ }
+
+ if (fs.f_type != HUGETLBFS_MAGIC)
+ fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
+
+ return fs.f_bsize;
+}
+
+static void *file_ram_alloc(RAMBlock *block,
+ ram_addr_t memory,
+ const char *path)
+{
+ char *filename;
+ void *area;
+ int fd;
+#ifdef MAP_POPULATE
+ int flags;
+#endif
+ unsigned long hpagesize;
+
+ hpagesize = gethugepagesize(path);
+ if (!hpagesize) {
+ return NULL;
+ }
+
+ if (memory < hpagesize) {
+ return NULL;
+ }
+
+ if (kvm_enabled() && !kvm_has_sync_mmu()) {
+ fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
+ return NULL;
+ }
+
+ if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
+ return NULL;
+ }
+
+ fd = mkstemp(filename);
+ if (fd < 0) {
+ perror("unable to create backing store for hugepages");
+ free(filename);
+ return NULL;
+ }
+ unlink(filename);
+ free(filename);
+
+ memory = (memory+hpagesize-1) & ~(hpagesize-1);
+
+ /*
+ * ftruncate is not supported by hugetlbfs in older
+ * hosts, so don't bother bailing out on errors.
+ * If anything goes wrong with it under other filesystems,
+ * mmap will fail.
+ */
+ if (ftruncate(fd, memory))
+ perror("ftruncate");
+
+#ifdef MAP_POPULATE
+ /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
+ * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
+ * to sidestep this quirk.
+ */
+ flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
+ area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
+#else
+ area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+#endif
+ if (area == MAP_FAILED) {
+ perror("file_ram_alloc: can't mmap RAM pages");
+ close(fd);
+ return (NULL);
+ }
+ block->fd = fd;
+ return area;
+}
+#endif
+
+static ram_addr_t find_ram_offset(ram_addr_t size)
+{
+ RAMBlock *block, *next_block;
+ ram_addr_t offset = 0, mingap = ULONG_MAX;
+
+ if (QLIST_EMPTY(&ram_list.blocks))
+ return 0;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ ram_addr_t end, next = ULONG_MAX;
+
+ end = block->offset + block->length;
+
+ QLIST_FOREACH(next_block, &ram_list.blocks, next) {
+ if (next_block->offset >= end) {
+ next = MIN(next, next_block->offset);
+ }
+ }
+ if (next - end >= size && next - end < mingap) {
+ offset = end;
+ mingap = next - end;
+ }
+ }
+ return offset;
+}
+
+static ram_addr_t last_ram_offset(void)
+{
+ RAMBlock *block;
+ ram_addr_t last = 0;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next)
+ last = MAX(last, block->offset + block->length);
+
+ return last;
+}
+
+ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
+ ram_addr_t size, void *host)
+{
+ RAMBlock *new_block, *block;
+
+ size = TARGET_PAGE_ALIGN(size);
+ new_block = qemu_mallocz(sizeof(*new_block));
+
+ if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
+ char *id = dev->parent_bus->info->get_dev_path(dev);
+ if (id) {
+ snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
+ qemu_free(id);
+ }
+ }
+ pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (!strcmp(block->idstr, new_block->idstr)) {
+ fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
+ new_block->idstr);
+ abort();
+ }
+ }
+
+ if (host) {
+ new_block->host = host;
+ new_block->flags |= RAM_PREALLOC_MASK;
+ } else {
+ if (mem_path) {
+#if defined (__linux__) && !defined(TARGET_S390X)
+ new_block->host = file_ram_alloc(new_block, size, mem_path);
+ if (!new_block->host) {
+ new_block->host = qemu_vmalloc(size);
+ qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
+ }
+#else
+ fprintf(stderr, "-mem-path option unsupported\n");
+ exit(1);
+#endif
+ } else {
+#if defined(TARGET_S390X) && defined(CONFIG_KVM)
+ /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
+ new_block->host = mmap((void*)0x1000000, size,
+ PROT_EXEC|PROT_READ|PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+#else
+ new_block->host = qemu_vmalloc(size);
+#endif
+ qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
+ }
+ }
+
+ new_block->offset = find_ram_offset(size);
+ new_block->length = size;
+
+ QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
+
+ ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
+ last_ram_offset() >> TARGET_PAGE_BITS);
+ memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
+ 0xff, size >> TARGET_PAGE_BITS);
+
+ if (kvm_enabled())
+ kvm_setup_guest_memory(new_block->host, size);
+
+ return new_block->offset;
+}
+
+ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
+{
+ return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
+}
+
+void qemu_ram_free(ram_addr_t addr)
+{
+ RAMBlock *block;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (addr == block->offset) {
+ QLIST_REMOVE(block, next);
+ if (block->flags & RAM_PREALLOC_MASK) {
+ ;
+ } else if (mem_path) {
+#if defined (__linux__) && !defined(TARGET_S390X)
+ if (block->fd) {
+ munmap(block->host, block->length);
+ close(block->fd);
+ } else {
+ qemu_vfree(block->host);