]>
Commit | Line | Data |
---|---|---|
794e8f30 MT |
1 | /* |
2 | * Support for RAM backed by mmaped host memory. | |
3 | * | |
4 | * Copyright (c) 2015 Red Hat, Inc. | |
5 | * | |
6 | * Authors: | |
7 | * Michael S. Tsirkin <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or | |
10 | * later. See the COPYING file in the top-level directory. | |
11 | */ | |
a9c94277 | 12 | |
aafd7584 | 13 | #include "qemu/osdep.h" |
a9c94277 | 14 | #include "qemu/mmap-alloc.h" |
4a3ecf20 | 15 | #include "qemu/host-utils.h" |
794e8f30 | 16 | |
7197fb40 MT |
17 | #define HUGETLBFS_MAGIC 0x958458f6 |
18 | ||
19 | #ifdef CONFIG_LINUX | |
20 | #include <sys/vfs.h> | |
21 | #endif | |
22 | ||
23 | size_t qemu_fd_getpagesize(int fd) | |
24 | { | |
25 | #ifdef CONFIG_LINUX | |
26 | struct statfs fs; | |
27 | int ret; | |
28 | ||
29 | if (fd != -1) { | |
30 | do { | |
31 | ret = fstatfs(fd, &fs); | |
32 | } while (ret != 0 && errno == EINTR); | |
33 | ||
34 | if (ret == 0 && fs.f_type == HUGETLBFS_MAGIC) { | |
35 | return fs.f_bsize; | |
36 | } | |
37 | } | |
57d1f6d7 PM |
38 | #ifdef __sparc__ |
39 | /* SPARC Linux needs greater alignment than the pagesize */ | |
40 | return QEMU_VMALLOC_ALIGN; | |
41 | #endif | |
7197fb40 MT |
42 | #endif |
43 | ||
44 | return getpagesize(); | |
45 | } | |
46 | ||
9c607668 AK |
47 | size_t qemu_mempath_getpagesize(const char *mem_path) |
48 | { | |
49 | #ifdef CONFIG_LINUX | |
50 | struct statfs fs; | |
51 | int ret; | |
52 | ||
0de6e2a3 DG |
53 | if (mem_path) { |
54 | do { | |
55 | ret = statfs(mem_path, &fs); | |
56 | } while (ret != 0 && errno == EINTR); | |
9c607668 | 57 | |
0de6e2a3 DG |
58 | if (ret != 0) { |
59 | fprintf(stderr, "Couldn't statfs() memory path: %s\n", | |
60 | strerror(errno)); | |
61 | exit(1); | |
62 | } | |
9c607668 | 63 | |
0de6e2a3 DG |
64 | if (fs.f_type == HUGETLBFS_MAGIC) { |
65 | /* It's hugepage, return the huge page size */ | |
66 | return fs.f_bsize; | |
67 | } | |
9c607668 | 68 | } |
57d1f6d7 PM |
69 | #ifdef __sparc__ |
70 | /* SPARC Linux needs greater alignment than the pagesize */ | |
71 | return QEMU_VMALLOC_ALIGN; | |
72 | #endif | |
9c607668 AK |
73 | #endif |
74 | ||
75 | return getpagesize(); | |
76 | } | |
77 | ||
2ac0f162 ZY |
78 | void *qemu_ram_mmap(int fd, |
79 | size_t size, | |
80 | size_t align, | |
81 | bool shared, | |
82 | bool is_pmem) | |
794e8f30 | 83 | { |
2044c3e7 MOA |
84 | int flags; |
85 | int guardfd; | |
86 | size_t offset; | |
53adb9d4 | 87 | size_t pagesize; |
2044c3e7 MOA |
88 | size_t total; |
89 | void *guardptr; | |
90 | void *ptr; | |
91 | ||
794e8f30 MT |
92 | /* |
93 | * Note: this always allocates at least one extra page of virtual address | |
94 | * space, even if size is already aligned. | |
95 | */ | |
2044c3e7 MOA |
96 | total = size + align; |
97 | ||
7197fb40 MT |
98 | #if defined(__powerpc64__) && defined(__linux__) |
99 | /* On ppc64 mappings in the same segment (aka slice) must share the same | |
100 | * page size. Since we will be re-allocating part of this segment | |
097a50d0 MT |
101 | * from the supplied fd, we should make sure to use the same page size, to |
102 | * this end we mmap the supplied fd. In this case, set MAP_NORESERVE to | |
103 | * avoid allocating backing store memory. | |
104 | * We do this unless we are using the system page size, in which case | |
105 | * anonymous memory is OK. | |
7197fb40 | 106 | */ |
2044c3e7 | 107 | flags = MAP_PRIVATE; |
53adb9d4 MOA |
108 | pagesize = qemu_fd_getpagesize(fd); |
109 | if (fd == -1 || pagesize == getpagesize()) { | |
2044c3e7 MOA |
110 | guardfd = -1; |
111 | flags |= MAP_ANONYMOUS; | |
112 | } else { | |
113 | guardfd = fd; | |
114 | flags |= MAP_NORESERVE; | |
115 | } | |
7197fb40 | 116 | #else |
2044c3e7 | 117 | guardfd = -1; |
53adb9d4 | 118 | pagesize = getpagesize(); |
2044c3e7 | 119 | flags = MAP_PRIVATE | MAP_ANONYMOUS; |
7197fb40 | 120 | #endif |
794e8f30 | 121 | |
2044c3e7 MOA |
122 | guardptr = mmap(0, total, PROT_NONE, flags, guardfd, 0); |
123 | ||
124 | if (guardptr == MAP_FAILED) { | |
9d4ec937 | 125 | return MAP_FAILED; |
794e8f30 MT |
126 | } |
127 | ||
4a3ecf20 | 128 | assert(is_power_of_2(align)); |
794e8f30 | 129 | /* Always align to host page size */ |
53adb9d4 | 130 | assert(align >= pagesize); |
794e8f30 | 131 | |
2044c3e7 MOA |
132 | flags = MAP_FIXED; |
133 | flags |= fd == -1 ? MAP_ANONYMOUS : 0; | |
134 | flags |= shared ? MAP_SHARED : MAP_PRIVATE; | |
135 | offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr; | |
136 | ||
137 | ptr = mmap(guardptr + offset, size, PROT_READ | PROT_WRITE, flags, fd, 0); | |
138 | ||
139 | if (ptr == MAP_FAILED) { | |
140 | munmap(guardptr, total); | |
9d4ec937 | 141 | return MAP_FAILED; |
794e8f30 MT |
142 | } |
143 | ||
794e8f30 | 144 | if (offset > 0) { |
2044c3e7 | 145 | munmap(guardptr, offset); |
794e8f30 MT |
146 | } |
147 | ||
148 | /* | |
149 | * Leave a single PROT_NONE page allocated after the RAM block, to serve as | |
150 | * a guard page guarding against potential buffer overflows. | |
151 | */ | |
6e4c890e | 152 | total -= offset; |
53adb9d4 MOA |
153 | if (total > size + pagesize) { |
154 | munmap(ptr + size + pagesize, total - size - pagesize); | |
794e8f30 MT |
155 | } |
156 | ||
2044c3e7 | 157 | return ptr; |
794e8f30 MT |
158 | } |
159 | ||
53adb9d4 | 160 | void qemu_ram_munmap(int fd, void *ptr, size_t size) |
794e8f30 | 161 | { |
53adb9d4 MOA |
162 | size_t pagesize; |
163 | ||
794e8f30 MT |
164 | if (ptr) { |
165 | /* Unmap both the RAM block and the guard page */ | |
53adb9d4 MOA |
166 | #if defined(__powerpc64__) && defined(__linux__) |
167 | pagesize = qemu_fd_getpagesize(fd); | |
168 | #else | |
169 | pagesize = getpagesize(); | |
170 | #endif | |
171 | munmap(ptr, size + pagesize); | |
794e8f30 MT |
172 | } |
173 | } |