]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Support for RAM backed by mmaped host memory. | |
3 | * | |
4 | * Copyright (c) 2015 Red Hat, Inc. | |
5 | * | |
6 | * Authors: | |
7 | * Michael S. Tsirkin <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or | |
10 | * later. See the COPYING file in the top-level directory. | |
11 | */ | |
12 | ||
13 | #include "qemu/osdep.h" | |
14 | #include "qemu/mmap-alloc.h" | |
15 | #include "qemu/host-utils.h" | |
16 | ||
17 | #define HUGETLBFS_MAGIC 0x958458f6 | |
18 | ||
19 | #ifdef CONFIG_LINUX | |
20 | #include <sys/vfs.h> | |
21 | #endif | |
22 | ||
23 | size_t qemu_fd_getpagesize(int fd) | |
24 | { | |
25 | #ifdef CONFIG_LINUX | |
26 | struct statfs fs; | |
27 | int ret; | |
28 | ||
29 | if (fd != -1) { | |
30 | do { | |
31 | ret = fstatfs(fd, &fs); | |
32 | } while (ret != 0 && errno == EINTR); | |
33 | ||
34 | if (ret == 0 && fs.f_type == HUGETLBFS_MAGIC) { | |
35 | return fs.f_bsize; | |
36 | } | |
37 | } | |
38 | #ifdef __sparc__ | |
39 | /* SPARC Linux needs greater alignment than the pagesize */ | |
40 | return QEMU_VMALLOC_ALIGN; | |
41 | #endif | |
42 | #endif | |
43 | ||
44 | return getpagesize(); | |
45 | } | |
46 | ||
47 | size_t qemu_mempath_getpagesize(const char *mem_path) | |
48 | { | |
49 | #ifdef CONFIG_LINUX | |
50 | struct statfs fs; | |
51 | int ret; | |
52 | ||
53 | if (mem_path) { | |
54 | do { | |
55 | ret = statfs(mem_path, &fs); | |
56 | } while (ret != 0 && errno == EINTR); | |
57 | ||
58 | if (ret != 0) { | |
59 | fprintf(stderr, "Couldn't statfs() memory path: %s\n", | |
60 | strerror(errno)); | |
61 | exit(1); | |
62 | } | |
63 | ||
64 | if (fs.f_type == HUGETLBFS_MAGIC) { | |
65 | /* It's hugepage, return the huge page size */ | |
66 | return fs.f_bsize; | |
67 | } | |
68 | } | |
69 | #ifdef __sparc__ | |
70 | /* SPARC Linux needs greater alignment than the pagesize */ | |
71 | return QEMU_VMALLOC_ALIGN; | |
72 | #endif | |
73 | #endif | |
74 | ||
75 | return getpagesize(); | |
76 | } | |
77 | ||
78 | void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) | |
79 | { | |
80 | /* | |
81 | * Note: this always allocates at least one extra page of virtual address | |
82 | * space, even if size is already aligned. | |
83 | */ | |
84 | size_t total = size + align; | |
85 | #if defined(__powerpc64__) && defined(__linux__) | |
86 | /* On ppc64 mappings in the same segment (aka slice) must share the same | |
87 | * page size. Since we will be re-allocating part of this segment | |
88 | * from the supplied fd, we should make sure to use the same page size, to | |
89 | * this end we mmap the supplied fd. In this case, set MAP_NORESERVE to | |
90 | * avoid allocating backing store memory. | |
91 | * We do this unless we are using the system page size, in which case | |
92 | * anonymous memory is OK. | |
93 | */ | |
94 | int anonfd = fd == -1 || qemu_fd_getpagesize(fd) == getpagesize() ? -1 : fd; | |
95 | int flags = anonfd == -1 ? MAP_ANONYMOUS : MAP_NORESERVE; | |
96 | void *ptr = mmap(0, total, PROT_NONE, flags | MAP_PRIVATE, anonfd, 0); | |
97 | #else | |
98 | void *ptr = mmap(0, total, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); | |
99 | #endif | |
100 | size_t offset; | |
101 | void *ptr1; | |
102 | ||
103 | if (ptr == MAP_FAILED) { | |
104 | return MAP_FAILED; | |
105 | } | |
106 | ||
107 | assert(is_power_of_2(align)); | |
108 | /* Always align to host page size */ | |
109 | assert(align >= getpagesize()); | |
110 | ||
111 | offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr; | |
112 | ptr1 = mmap(ptr + offset, size, PROT_READ | PROT_WRITE, | |
113 | MAP_FIXED | | |
114 | (fd == -1 ? MAP_ANONYMOUS : 0) | | |
115 | (shared ? MAP_SHARED : MAP_PRIVATE), | |
116 | fd, 0); | |
117 | if (ptr1 == MAP_FAILED) { | |
118 | munmap(ptr, total); | |
119 | return MAP_FAILED; | |
120 | } | |
121 | ||
122 | if (offset > 0) { | |
123 | munmap(ptr, offset); | |
124 | } | |
125 | ||
126 | /* | |
127 | * Leave a single PROT_NONE page allocated after the RAM block, to serve as | |
128 | * a guard page guarding against potential buffer overflows. | |
129 | */ | |
130 | total -= offset; | |
131 | if (total > size + getpagesize()) { | |
132 | munmap(ptr1 + size + getpagesize(), total - size - getpagesize()); | |
133 | } | |
134 | ||
135 | return ptr1; | |
136 | } | |
137 | ||
138 | void qemu_ram_munmap(void *ptr, size_t size) | |
139 | { | |
140 | if (ptr) { | |
141 | /* Unmap both the RAM block and the guard page */ | |
142 | munmap(ptr, size + getpagesize()); | |
143 | } | |
144 | } |