]>
Commit | Line | Data |
---|---|---|
54936004 FB |
1 | /* |
2 | * mmap support for qemu | |
5fafdf24 | 3 | * |
54936004 FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
8167ee88 | 17 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
54936004 | 18 | */ |
d39594e9 | 19 | #include "qemu/osdep.h" |
11d96056 | 20 | #include "trace.h" |
10d0d505 | 21 | #include "exec/log.h" |
54936004 | 22 | #include "qemu.h" |
3b249d26 | 23 | #include "user-internals.h" |
5423e6d3 | 24 | #include "user-mmap.h" |
54936004 | 25 | |
1e6eec8b | 26 | static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER; |
dfd3f85c | 27 | static __thread int mmap_lock_count; |
c8a706fe PB |
28 | |
29 | void mmap_lock(void) | |
30 | { | |
31 | if (mmap_lock_count++ == 0) { | |
32 | pthread_mutex_lock(&mmap_mutex); | |
33 | } | |
34 | } | |
35 | ||
36 | void mmap_unlock(void) | |
37 | { | |
38 | if (--mmap_lock_count == 0) { | |
39 | pthread_mutex_unlock(&mmap_mutex); | |
40 | } | |
41 | } | |
d5975363 | 42 | |
301e40ed AB |
43 | bool have_mmap_lock(void) |
44 | { | |
45 | return mmap_lock_count > 0 ? true : false; | |
46 | } | |
47 | ||
d5975363 PB |
48 | /* Grab lock to make sure things are in a consistent state after fork(). */ |
49 | void mmap_fork_start(void) | |
50 | { | |
51 | if (mmap_lock_count) | |
52 | abort(); | |
53 | pthread_mutex_lock(&mmap_mutex); | |
54 | } | |
55 | ||
56 | void mmap_fork_end(int child) | |
57 | { | |
58 | if (child) | |
59 | pthread_mutex_init(&mmap_mutex, NULL); | |
60 | else | |
61 | pthread_mutex_unlock(&mmap_mutex); | |
62 | } | |
c8a706fe | 63 | |
9dba3ca5 RH |
64 | /* |
65 | * Validate target prot bitmask. | |
66 | * Return the prot bitmask for the host in *HOST_PROT. | |
67 | * Return 0 if the target prot bitmask is invalid, otherwise | |
68 | * the internal qemu page_flags (which will include PAGE_VALID). | |
69 | */ | |
70 | static int validate_prot_to_pageflags(int *host_prot, int prot) | |
71 | { | |
72 | int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM; | |
73 | int page_flags = (prot & PAGE_BITS) | PAGE_VALID; | |
74 | ||
75 | /* | |
76 | * For the host, we need not pass anything except read/write/exec. | |
77 | * While PROT_SEM is allowed by all hosts, it is also ignored, so | |
78 | * don't bother transforming guest bit to host bit. Any other | |
79 | * target-specific prot bits will not be understood by the host | |
80 | * and will need to be encoded into page_flags for qemu emulation. | |
4eaa960d RH |
81 | * |
82 | * Pages that are executable by the guest will never be executed | |
83 | * by the host, but the host will need to be able to read them. | |
9dba3ca5 | 84 | */ |
4eaa960d RH |
85 | *host_prot = (prot & (PROT_READ | PROT_WRITE)) |
86 | | (prot & PROT_EXEC ? PROT_READ : 0); | |
9dba3ca5 | 87 | |
be5d6f48 | 88 | #ifdef TARGET_AARCH64 |
d109b46d | 89 | { |
be5d6f48 | 90 | ARMCPU *cpu = ARM_CPU(thread_cpu); |
d109b46d RH |
91 | |
92 | /* | |
93 | * The PROT_BTI bit is only accepted if the cpu supports the feature. | |
94 | * Since this is the unusual case, don't bother checking unless | |
95 | * the bit has been requested. If set and valid, record the bit | |
96 | * within QEMU's page_flags. | |
97 | */ | |
98 | if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) { | |
be5d6f48 RH |
99 | valid |= TARGET_PROT_BTI; |
100 | page_flags |= PAGE_BTI; | |
101 | } | |
d109b46d RH |
102 | /* Similarly for the PROT_MTE bit. */ |
103 | if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) { | |
104 | valid |= TARGET_PROT_MTE; | |
105 | page_flags |= PAGE_MTE; | |
106 | } | |
be5d6f48 RH |
107 | } |
108 | #endif | |
109 | ||
9dba3ca5 RH |
110 | return prot & ~valid ? 0 : page_flags; |
111 | } | |
112 | ||
53a5960a | 113 | /* NOTE: all the constants are the HOST ones, but addresses are target. */ |
9dba3ca5 | 114 | int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) |
54936004 | 115 | { |
992f48a0 | 116 | abi_ulong end, host_start, host_end, addr; |
9dba3ca5 | 117 | int prot1, ret, page_flags, host_prot; |
54936004 | 118 | |
9dba3ca5 | 119 | trace_target_mprotect(start, len, target_prot); |
54936004 | 120 | |
9dba3ca5 | 121 | if ((start & ~TARGET_PAGE_MASK) != 0) { |
78cf3390 | 122 | return -TARGET_EINVAL; |
9dba3ca5 RH |
123 | } |
124 | page_flags = validate_prot_to_pageflags(&host_prot, target_prot); | |
125 | if (!page_flags) { | |
126 | return -TARGET_EINVAL; | |
127 | } | |
54936004 FB |
128 | len = TARGET_PAGE_ALIGN(len); |
129 | end = start + len; | |
46b12f46 | 130 | if (!guest_range_valid_untagged(start, len)) { |
78cf3390 | 131 | return -TARGET_ENOMEM; |
ebf9a363 | 132 | } |
9dba3ca5 | 133 | if (len == 0) { |
54936004 | 134 | return 0; |
9dba3ca5 | 135 | } |
3b46e624 | 136 | |
c8a706fe | 137 | mmap_lock(); |
83fb7adf | 138 | host_start = start & qemu_host_page_mask; |
54936004 FB |
139 | host_end = HOST_PAGE_ALIGN(end); |
140 | if (start > host_start) { | |
141 | /* handle host page containing start */ | |
9dba3ca5 RH |
142 | prot1 = host_prot; |
143 | for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) { | |
54936004 FB |
144 | prot1 |= page_get_flags(addr); |
145 | } | |
83fb7adf | 146 | if (host_end == host_start + qemu_host_page_size) { |
9dba3ca5 | 147 | for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) { |
d418c81e FB |
148 | prot1 |= page_get_flags(addr); |
149 | } | |
150 | end = host_end; | |
151 | } | |
3e8f1628 | 152 | ret = mprotect(g2h_untagged(host_start), qemu_host_page_size, |
9dba3ca5 RH |
153 | prot1 & PAGE_BITS); |
154 | if (ret != 0) { | |
c8a706fe | 155 | goto error; |
9dba3ca5 | 156 | } |
83fb7adf | 157 | host_start += qemu_host_page_size; |
54936004 FB |
158 | } |
159 | if (end < host_end) { | |
9dba3ca5 RH |
160 | prot1 = host_prot; |
161 | for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) { | |
54936004 FB |
162 | prot1 |= page_get_flags(addr); |
163 | } | |
3e8f1628 | 164 | ret = mprotect(g2h_untagged(host_end - qemu_host_page_size), |
9dba3ca5 RH |
165 | qemu_host_page_size, prot1 & PAGE_BITS); |
166 | if (ret != 0) { | |
c8a706fe | 167 | goto error; |
9dba3ca5 | 168 | } |
83fb7adf | 169 | host_end -= qemu_host_page_size; |
54936004 | 170 | } |
3b46e624 | 171 | |
54936004 FB |
172 | /* handle the pages in the middle */ |
173 | if (host_start < host_end) { | |
3e8f1628 RH |
174 | ret = mprotect(g2h_untagged(host_start), |
175 | host_end - host_start, host_prot); | |
9dba3ca5 | 176 | if (ret != 0) { |
c8a706fe | 177 | goto error; |
9dba3ca5 | 178 | } |
54936004 | 179 | } |
9dba3ca5 | 180 | page_set_flags(start, start + len, page_flags); |
c8a706fe | 181 | mmap_unlock(); |
54936004 | 182 | return 0; |
c8a706fe PB |
183 | error: |
184 | mmap_unlock(); | |
185 | return ret; | |
54936004 FB |
186 | } |
187 | ||
188 | /* map an incomplete host page */ | |
992f48a0 BS |
189 | static int mmap_frag(abi_ulong real_start, |
190 | abi_ulong start, abi_ulong end, | |
191 | int prot, int flags, int fd, abi_ulong offset) | |
54936004 | 192 | { |
80210bcd | 193 | abi_ulong real_end, addr; |
53a5960a | 194 | void *host_start; |
54936004 FB |
195 | int prot1, prot_new; |
196 | ||
53a5960a | 197 | real_end = real_start + qemu_host_page_size; |
3e8f1628 | 198 | host_start = g2h_untagged(real_start); |
54936004 FB |
199 | |
200 | /* get the protection of the target pages outside the mapping */ | |
201 | prot1 = 0; | |
53a5960a | 202 | for(addr = real_start; addr < real_end; addr++) { |
54936004 FB |
203 | if (addr < start || addr >= end) |
204 | prot1 |= page_get_flags(addr); | |
205 | } | |
3b46e624 | 206 | |
54936004 FB |
207 | if (prot1 == 0) { |
208 | /* no page was there, so we allocate one */ | |
80210bcd TS |
209 | void *p = mmap(host_start, qemu_host_page_size, prot, |
210 | flags | MAP_ANONYMOUS, -1, 0); | |
211 | if (p == MAP_FAILED) | |
212 | return -1; | |
53a5960a | 213 | prot1 = prot; |
54936004 FB |
214 | } |
215 | prot1 &= PAGE_BITS; | |
216 | ||
217 | prot_new = prot | prot1; | |
218 | if (!(flags & MAP_ANONYMOUS)) { | |
219 | /* msync() won't work here, so we return an error if write is | |
220 | possible while it is a shared mapping */ | |
221 | if ((flags & MAP_TYPE) == MAP_SHARED && | |
222 | (prot & PROT_WRITE)) | |
ee636500 | 223 | return -1; |
54936004 FB |
224 | |
225 | /* adjust protection to be able to read */ | |
226 | if (!(prot1 & PROT_WRITE)) | |
53a5960a | 227 | mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE); |
3b46e624 | 228 | |
54936004 | 229 | /* read the corresponding file data */ |
3e8f1628 | 230 | if (pread(fd, g2h_untagged(start), end - start, offset) == -1) |
fb7e378c | 231 | return -1; |
3b46e624 | 232 | |
54936004 FB |
233 | /* put final protection */ |
234 | if (prot_new != (prot1 | PROT_WRITE)) | |
53a5960a | 235 | mprotect(host_start, qemu_host_page_size, prot_new); |
54936004 | 236 | } else { |
54936004 | 237 | if (prot_new != prot1) { |
53a5960a | 238 | mprotect(host_start, qemu_host_page_size, prot_new); |
54936004 | 239 | } |
e6deac9c | 240 | if (prot_new & PROT_WRITE) { |
3e8f1628 | 241 | memset(g2h_untagged(start), 0, end - start); |
e6deac9c | 242 | } |
54936004 FB |
243 | } |
244 | return 0; | |
245 | } | |
246 | ||
14f24e14 | 247 | #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64 |
aab613fb LY |
248 | #ifdef TARGET_AARCH64 |
249 | # define TASK_UNMAPPED_BASE 0x5500000000 | |
250 | #else | |
14f24e14 | 251 | # define TASK_UNMAPPED_BASE (1ul << 38) |
aab613fb | 252 | #endif |
a03e2d42 | 253 | #else |
14f24e14 | 254 | # define TASK_UNMAPPED_BASE 0x40000000 |
a03e2d42 | 255 | #endif |
59e9d91c | 256 | abi_ulong mmap_next_start = TASK_UNMAPPED_BASE; |
a03e2d42 | 257 | |
0776590d PB |
258 | unsigned long last_brk; |
259 | ||
68a1c816 PB |
260 | /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk |
261 | of guest address space. */ | |
30ab9ef2 RH |
262 | static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, |
263 | abi_ulong align) | |
68a1c816 | 264 | { |
30ab9ef2 | 265 | abi_ulong addr, end_addr, incr = qemu_host_page_size; |
68a1c816 | 266 | int prot; |
30ab9ef2 | 267 | bool looped = false; |
68a1c816 | 268 | |
b76f21a7 | 269 | if (size > reserved_va) { |
68a1c816 PB |
270 | return (abi_ulong)-1; |
271 | } | |
272 | ||
30ab9ef2 RH |
273 | /* Note that start and size have already been aligned by mmap_find_vma. */ |
274 | ||
59e9d91c | 275 | end_addr = start + size; |
30ab9ef2 RH |
276 | if (start > reserved_va - size) { |
277 | /* Start at the top of the address space. */ | |
278 | end_addr = ((reserved_va - size) & -align) + size; | |
279 | looped = true; | |
59e9d91c | 280 | } |
59e9d91c | 281 | |
30ab9ef2 RH |
282 | /* Search downward from END_ADDR, checking to see if a page is in use. */ |
283 | addr = end_addr; | |
59e9d91c | 284 | while (1) { |
30ab9ef2 | 285 | addr -= incr; |
59e9d91c | 286 | if (addr > end_addr) { |
68a1c816 | 287 | if (looped) { |
30ab9ef2 | 288 | /* Failure. The entire address space has been searched. */ |
68a1c816 PB |
289 | return (abi_ulong)-1; |
290 | } | |
30ab9ef2 RH |
291 | /* Re-start at the top of the address space. */ |
292 | addr = end_addr = ((reserved_va - size) & -align) + size; | |
293 | looped = true; | |
294 | } else { | |
295 | prot = page_get_flags(addr); | |
296 | if (prot) { | |
297 | /* Page in use. Restart below this page. */ | |
298 | addr = end_addr = ((addr - size) & -align) + size; | |
299 | } else if (addr && addr + size == end_addr) { | |
300 | /* Success! All pages between ADDR and END_ADDR are free. */ | |
301 | if (start == mmap_next_start) { | |
302 | mmap_next_start = addr; | |
303 | } | |
304 | return addr; | |
305 | } | |
68a1c816 PB |
306 | } |
307 | } | |
68a1c816 PB |
308 | } |
309 | ||
fe3b4152 KS |
310 | /* |
311 | * Find and reserve a free memory area of size 'size'. The search | |
312 | * starts at 'start'. | |
313 | * It must be called with mmap_lock() held. | |
314 | * Return -1 if error. | |
315 | */ | |
30ab9ef2 | 316 | abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align) |
a03e2d42 | 317 | { |
14f24e14 | 318 | void *ptr, *prev; |
fe3b4152 | 319 | abi_ulong addr; |
14f24e14 | 320 | int wrapped, repeat; |
fe3b4152 | 321 | |
443b7505 RH |
322 | align = MAX(align, qemu_host_page_size); |
323 | ||
fe3b4152 | 324 | /* If 'start' == 0, then a default start address is used. */ |
14f24e14 | 325 | if (start == 0) { |
fe3b4152 | 326 | start = mmap_next_start; |
14f24e14 RH |
327 | } else { |
328 | start &= qemu_host_page_mask; | |
329 | } | |
30ab9ef2 | 330 | start = ROUND_UP(start, align); |
14f24e14 RH |
331 | |
332 | size = HOST_PAGE_ALIGN(size); | |
fe3b4152 | 333 | |
b76f21a7 | 334 | if (reserved_va) { |
30ab9ef2 | 335 | return mmap_find_vma_reserved(start, size, align); |
68a1c816 PB |
336 | } |
337 | ||
a03e2d42 | 338 | addr = start; |
14f24e14 RH |
339 | wrapped = repeat = 0; |
340 | prev = 0; | |
fe3b4152 | 341 | |
14f24e14 | 342 | for (;; prev = ptr) { |
fe3b4152 KS |
343 | /* |
344 | * Reserve needed memory area to avoid a race. | |
345 | * It should be discarded using: | |
346 | * - mmap() with MAP_FIXED flag | |
347 | * - mremap() with MREMAP_FIXED flag | |
348 | * - shmat() with SHM_REMAP flag | |
349 | */ | |
3e8f1628 | 350 | ptr = mmap(g2h_untagged(addr), size, PROT_NONE, |
fe3b4152 KS |
351 | MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); |
352 | ||
353 | /* ENOMEM, if host address space has no memory */ | |
14f24e14 | 354 | if (ptr == MAP_FAILED) { |
fe3b4152 | 355 | return (abi_ulong)-1; |
14f24e14 RH |
356 | } |
357 | ||
358 | /* Count the number of sequential returns of the same address. | |
359 | This is used to modify the search algorithm below. */ | |
360 | repeat = (ptr == prev ? repeat + 1 : 0); | |
361 | ||
362 | if (h2g_valid(ptr + size - 1)) { | |
363 | addr = h2g(ptr); | |
fe3b4152 | 364 | |
30ab9ef2 | 365 | if ((addr & (align - 1)) == 0) { |
14f24e14 RH |
366 | /* Success. */ |
367 | if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) { | |
368 | mmap_next_start = addr + size; | |
369 | } | |
370 | return addr; | |
371 | } | |
fe3b4152 | 372 | |
14f24e14 RH |
373 | /* The address is not properly aligned for the target. */ |
374 | switch (repeat) { | |
375 | case 0: | |
376 | /* Assume the result that the kernel gave us is the | |
377 | first with enough free space, so start again at the | |
378 | next higher target page. */ | |
30ab9ef2 | 379 | addr = ROUND_UP(addr, align); |
14f24e14 RH |
380 | break; |
381 | case 1: | |
382 | /* Sometimes the kernel decides to perform the allocation | |
383 | at the top end of memory instead. */ | |
30ab9ef2 | 384 | addr &= -align; |
14f24e14 RH |
385 | break; |
386 | case 2: | |
387 | /* Start over at low memory. */ | |
388 | addr = 0; | |
389 | break; | |
390 | default: | |
391 | /* Fail. This unaligned block must the last. */ | |
392 | addr = -1; | |
393 | break; | |
394 | } | |
395 | } else { | |
396 | /* Since the result the kernel gave didn't fit, start | |
397 | again at low memory. If any repetition, fail. */ | |
398 | addr = (repeat ? -1 : 0); | |
399 | } | |
400 | ||
401 | /* Unmap and try again. */ | |
fe3b4152 | 402 | munmap(ptr, size); |
fe3b4152 | 403 | |
14f24e14 | 404 | /* ENOMEM if we checked the whole of the target address space. */ |
d0b3e4f5 | 405 | if (addr == (abi_ulong)-1) { |
a03e2d42 | 406 | return (abi_ulong)-1; |
14f24e14 RH |
407 | } else if (addr == 0) { |
408 | if (wrapped) { | |
409 | return (abi_ulong)-1; | |
410 | } | |
411 | wrapped = 1; | |
412 | /* Don't actually use 0 when wrapping, instead indicate | |
8186e783 | 413 | that we'd truly like an allocation in low memory. */ |
14f24e14 RH |
414 | addr = (mmap_min_addr > TARGET_PAGE_SIZE |
415 | ? TARGET_PAGE_ALIGN(mmap_min_addr) | |
416 | : TARGET_PAGE_SIZE); | |
417 | } else if (wrapped && addr >= start) { | |
418 | return (abi_ulong)-1; | |
419 | } | |
a03e2d42 | 420 | } |
a03e2d42 FB |
421 | } |
422 | ||
54936004 | 423 | /* NOTE: all the constants are the HOST ones */ |
9dba3ca5 | 424 | abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, |
992f48a0 | 425 | int flags, int fd, abi_ulong offset) |
54936004 | 426 | { |
992f48a0 | 427 | abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len; |
9dba3ca5 | 428 | int page_flags, host_prot; |
54936004 | 429 | |
c8a706fe | 430 | mmap_lock(); |
9dba3ca5 | 431 | trace_target_mmap(start, len, target_prot, flags, fd, offset); |
54936004 | 432 | |
38138fab | 433 | if (!len) { |
e89f07d3 | 434 | errno = EINVAL; |
c8a706fe | 435 | goto fail; |
e89f07d3 | 436 | } |
54936004 | 437 | |
9dba3ca5 RH |
438 | page_flags = validate_prot_to_pageflags(&host_prot, target_prot); |
439 | if (!page_flags) { | |
440 | errno = EINVAL; | |
441 | goto fail; | |
442 | } | |
443 | ||
38138fab | 444 | /* Also check for overflows... */ |
54936004 | 445 | len = TARGET_PAGE_ALIGN(len); |
38138fab AB |
446 | if (!len) { |
447 | errno = ENOMEM; | |
448 | goto fail; | |
449 | } | |
450 | ||
451 | if (offset & ~TARGET_PAGE_MASK) { | |
452 | errno = EINVAL; | |
453 | goto fail; | |
454 | } | |
455 | ||
228168cb RH |
456 | /* |
457 | * If we're mapping shared memory, ensure we generate code for parallel | |
458 | * execution and flush old translations. This will work up to the level | |
459 | * supported by the host -- anything that requires EXCP_ATOMIC will not | |
460 | * be atomic with respect to an external process. | |
461 | */ | |
462 | if (flags & MAP_SHARED) { | |
463 | CPUState *cpu = thread_cpu; | |
464 | if (!(cpu->tcg_cflags & CF_PARALLEL)) { | |
465 | cpu->tcg_cflags |= CF_PARALLEL; | |
466 | tb_flush(cpu); | |
467 | } | |
468 | } | |
469 | ||
53a5960a | 470 | real_start = start & qemu_host_page_mask; |
a5e7ee46 RH |
471 | host_offset = offset & qemu_host_page_mask; |
472 | ||
473 | /* If the user is asking for the kernel to find a location, do that | |
474 | before we truncate the length for mapping files below. */ | |
475 | if (!(flags & MAP_FIXED)) { | |
476 | host_len = len + offset - host_offset; | |
477 | host_len = HOST_PAGE_ALIGN(host_len); | |
30ab9ef2 | 478 | start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE); |
a5e7ee46 RH |
479 | if (start == (abi_ulong)-1) { |
480 | errno = ENOMEM; | |
481 | goto fail; | |
482 | } | |
483 | } | |
54936004 | 484 | |
54c5a2ae EI |
485 | /* When mapping files into a memory area larger than the file, accesses |
486 | to pages beyond the file size will cause a SIGBUS. | |
487 | ||
488 | For example, if mmaping a file of 100 bytes on a host with 4K pages | |
489 | emulating a target with 8K pages, the target expects to be able to | |
490 | access the first 8K. But the host will trap us on any access beyond | |
491 | 4K. | |
492 | ||
493 | When emulating a target with a larger page-size than the hosts, we | |
494 | may need to truncate file maps at EOF and add extra anonymous pages | |
495 | up to the targets page boundary. */ | |
496 | ||
8e3b0cbb | 497 | if ((qemu_real_host_page_size() < qemu_host_page_size) && |
35f2fd04 MAL |
498 | !(flags & MAP_ANONYMOUS)) { |
499 | struct stat sb; | |
54c5a2ae EI |
500 | |
501 | if (fstat (fd, &sb) == -1) | |
502 | goto fail; | |
503 | ||
504 | /* Are we trying to create a map beyond EOF?. */ | |
505 | if (offset + len > sb.st_size) { | |
506 | /* If so, truncate the file map at eof aligned with | |
507 | the hosts real pagesize. Additional anonymous maps | |
508 | will be created beyond EOF. */ | |
0c2d70c4 | 509 | len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset); |
54c5a2ae EI |
510 | } |
511 | } | |
512 | ||
54936004 | 513 | if (!(flags & MAP_FIXED)) { |
a5e7ee46 | 514 | unsigned long host_start; |
a03e2d42 | 515 | void *p; |
a5e7ee46 | 516 | |
a03e2d42 FB |
517 | host_len = len + offset - host_offset; |
518 | host_len = HOST_PAGE_ALIGN(host_len); | |
a5e7ee46 | 519 | |
a03e2d42 FB |
520 | /* Note: we prefer to control the mapping address. It is |
521 | especially important if qemu_host_page_size > | |
522 | qemu_real_host_page_size */ | |
3e8f1628 | 523 | p = mmap(g2h_untagged(start), host_len, host_prot, |
a5e7ee46 | 524 | flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0); |
9dba3ca5 | 525 | if (p == MAP_FAILED) { |
c8a706fe | 526 | goto fail; |
9dba3ca5 | 527 | } |
a03e2d42 FB |
528 | /* update start so that it points to the file position at 'offset' */ |
529 | host_start = (unsigned long)p; | |
54c5a2ae | 530 | if (!(flags & MAP_ANONYMOUS)) { |
3e8f1628 | 531 | p = mmap(g2h_untagged(start), len, host_prot, |
54c5a2ae | 532 | flags | MAP_FIXED, fd, host_offset); |
8384274e | 533 | if (p == MAP_FAILED) { |
3e8f1628 | 534 | munmap(g2h_untagged(start), host_len); |
8384274e JB |
535 | goto fail; |
536 | } | |
a03e2d42 | 537 | host_start += offset - host_offset; |
54c5a2ae | 538 | } |
a03e2d42 FB |
539 | start = h2g(host_start); |
540 | } else { | |
541 | if (start & ~TARGET_PAGE_MASK) { | |
e89f07d3 | 542 | errno = EINVAL; |
c8a706fe | 543 | goto fail; |
e89f07d3 | 544 | } |
a03e2d42 FB |
545 | end = start + len; |
546 | real_end = HOST_PAGE_ALIGN(end); | |
7ab240ad | 547 | |
7d37435b PB |
548 | /* |
549 | * Test if requested memory area fits target address space | |
550 | * It can fail only on 64-bit host with 32-bit target. | |
551 | * On any other target/host host mmap() handles this error correctly. | |
552 | */ | |
46b12f46 | 553 | if (end < start || !guest_range_valid_untagged(start, len)) { |
ebf9a363 | 554 | errno = ENOMEM; |
45bc1f52 AJ |
555 | goto fail; |
556 | } | |
557 | ||
a03e2d42 FB |
558 | /* worst case: we cannot map the file because the offset is not |
559 | aligned, so we read it */ | |
560 | if (!(flags & MAP_ANONYMOUS) && | |
561 | (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) { | |
562 | /* msync() won't work here, so we return an error if write is | |
563 | possible while it is a shared mapping */ | |
564 | if ((flags & MAP_TYPE) == MAP_SHARED && | |
9dba3ca5 | 565 | (host_prot & PROT_WRITE)) { |
a03e2d42 | 566 | errno = EINVAL; |
c8a706fe | 567 | goto fail; |
a03e2d42 | 568 | } |
9dba3ca5 | 569 | retaddr = target_mmap(start, len, target_prot | PROT_WRITE, |
a03e2d42 FB |
570 | MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, |
571 | -1, 0); | |
572 | if (retaddr == -1) | |
c8a706fe | 573 | goto fail; |
3e8f1628 | 574 | if (pread(fd, g2h_untagged(start), len, offset) == -1) |
fb7e378c | 575 | goto fail; |
9dba3ca5 RH |
576 | if (!(host_prot & PROT_WRITE)) { |
577 | ret = target_mprotect(start, len, target_prot); | |
86abac06 | 578 | assert(ret == 0); |
a03e2d42 FB |
579 | } |
580 | goto the_end; | |
54936004 | 581 | } |
a03e2d42 FB |
582 | |
583 | /* handle the start of the mapping */ | |
584 | if (start > real_start) { | |
585 | if (real_end == real_start + qemu_host_page_size) { | |
586 | /* one single host page */ | |
587 | ret = mmap_frag(real_start, start, end, | |
9dba3ca5 | 588 | host_prot, flags, fd, offset); |
a03e2d42 | 589 | if (ret == -1) |
c8a706fe | 590 | goto fail; |
a03e2d42 FB |
591 | goto the_end1; |
592 | } | |
593 | ret = mmap_frag(real_start, start, real_start + qemu_host_page_size, | |
9dba3ca5 | 594 | host_prot, flags, fd, offset); |
54936004 | 595 | if (ret == -1) |
c8a706fe | 596 | goto fail; |
a03e2d42 FB |
597 | real_start += qemu_host_page_size; |
598 | } | |
599 | /* handle the end of the mapping */ | |
600 | if (end < real_end) { | |
601 | ret = mmap_frag(real_end - qemu_host_page_size, | |
530c0032 | 602 | real_end - qemu_host_page_size, end, |
9dba3ca5 | 603 | host_prot, flags, fd, |
a03e2d42 FB |
604 | offset + real_end - qemu_host_page_size - start); |
605 | if (ret == -1) | |
c8a706fe | 606 | goto fail; |
a03e2d42 | 607 | real_end -= qemu_host_page_size; |
54936004 | 608 | } |
3b46e624 | 609 | |
a03e2d42 FB |
610 | /* map the middle (easier) */ |
611 | if (real_start < real_end) { | |
612 | void *p; | |
613 | unsigned long offset1; | |
614 | if (flags & MAP_ANONYMOUS) | |
615 | offset1 = 0; | |
616 | else | |
617 | offset1 = offset + real_start - start; | |
3e8f1628 | 618 | p = mmap(g2h_untagged(real_start), real_end - real_start, |
9dba3ca5 | 619 | host_prot, flags, fd, offset1); |
a03e2d42 | 620 | if (p == MAP_FAILED) |
c8a706fe | 621 | goto fail; |
a03e2d42 | 622 | } |
54936004 FB |
623 | } |
624 | the_end1: | |
26bab757 RH |
625 | if (flags & MAP_ANONYMOUS) { |
626 | page_flags |= PAGE_ANON; | |
627 | } | |
d9c58585 | 628 | page_flags |= PAGE_RESET; |
9dba3ca5 | 629 | page_set_flags(start, start + len, page_flags); |
54936004 | 630 | the_end: |
d0e165ae | 631 | trace_target_mmap_complete(start); |
10d0d505 | 632 | if (qemu_loglevel_mask(CPU_LOG_PAGE)) { |
93756fdc RH |
633 | FILE *f = qemu_log_trylock(); |
634 | if (f) { | |
635 | fprintf(f, "page layout changed following mmap\n"); | |
636 | page_dump(f); | |
637 | qemu_log_unlock(f); | |
638 | } | |
10d0d505 | 639 | } |
35865339 | 640 | tb_invalidate_phys_range(start, start + len); |
c8a706fe | 641 | mmap_unlock(); |
54936004 | 642 | return start; |
c8a706fe PB |
643 | fail: |
644 | mmap_unlock(); | |
645 | return -1; | |
54936004 FB |
646 | } |
647 | ||
68a1c816 PB |
648 | static void mmap_reserve(abi_ulong start, abi_ulong size) |
649 | { | |
650 | abi_ulong real_start; | |
651 | abi_ulong real_end; | |
652 | abi_ulong addr; | |
653 | abi_ulong end; | |
654 | int prot; | |
655 | ||
656 | real_start = start & qemu_host_page_mask; | |
657 | real_end = HOST_PAGE_ALIGN(start + size); | |
658 | end = start + size; | |
659 | if (start > real_start) { | |
660 | /* handle host page containing start */ | |
661 | prot = 0; | |
662 | for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) { | |
663 | prot |= page_get_flags(addr); | |
664 | } | |
665 | if (real_end == real_start + qemu_host_page_size) { | |
666 | for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { | |
667 | prot |= page_get_flags(addr); | |
668 | } | |
669 | end = real_end; | |
670 | } | |
671 | if (prot != 0) | |
672 | real_start += qemu_host_page_size; | |
673 | } | |
674 | if (end < real_end) { | |
675 | prot = 0; | |
676 | for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { | |
677 | prot |= page_get_flags(addr); | |
678 | } | |
679 | if (prot != 0) | |
680 | real_end -= qemu_host_page_size; | |
681 | } | |
682 | if (real_start != real_end) { | |
3e8f1628 | 683 | mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE, |
68a1c816 PB |
684 | MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, |
685 | -1, 0); | |
686 | } | |
687 | } | |
688 | ||
992f48a0 | 689 | int target_munmap(abi_ulong start, abi_ulong len) |
54936004 | 690 | { |
992f48a0 | 691 | abi_ulong end, real_start, real_end, addr; |
54936004 FB |
692 | int prot, ret; |
693 | ||
b7b18d26 AB |
694 | trace_target_munmap(start, len); |
695 | ||
54936004 | 696 | if (start & ~TARGET_PAGE_MASK) |
78cf3390 | 697 | return -TARGET_EINVAL; |
54936004 | 698 | len = TARGET_PAGE_ALIGN(len); |
46b12f46 | 699 | if (len == 0 || !guest_range_valid_untagged(start, len)) { |
78cf3390 | 700 | return -TARGET_EINVAL; |
ebf9a363 MF |
701 | } |
702 | ||
c8a706fe | 703 | mmap_lock(); |
54936004 | 704 | end = start + len; |
53a5960a PB |
705 | real_start = start & qemu_host_page_mask; |
706 | real_end = HOST_PAGE_ALIGN(end); | |
54936004 | 707 | |
53a5960a | 708 | if (start > real_start) { |
54936004 FB |
709 | /* handle host page containing start */ |
710 | prot = 0; | |
53a5960a | 711 | for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) { |
54936004 FB |
712 | prot |= page_get_flags(addr); |
713 | } | |
53a5960a PB |
714 | if (real_end == real_start + qemu_host_page_size) { |
715 | for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { | |
d418c81e FB |
716 | prot |= page_get_flags(addr); |
717 | } | |
53a5960a | 718 | end = real_end; |
d418c81e | 719 | } |
54936004 | 720 | if (prot != 0) |
53a5960a | 721 | real_start += qemu_host_page_size; |
54936004 | 722 | } |
53a5960a | 723 | if (end < real_end) { |
54936004 | 724 | prot = 0; |
53a5960a | 725 | for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { |
54936004 FB |
726 | prot |= page_get_flags(addr); |
727 | } | |
728 | if (prot != 0) | |
53a5960a | 729 | real_end -= qemu_host_page_size; |
54936004 | 730 | } |
3b46e624 | 731 | |
c8a706fe | 732 | ret = 0; |
54936004 | 733 | /* unmap what we can */ |
53a5960a | 734 | if (real_start < real_end) { |
b76f21a7 | 735 | if (reserved_va) { |
68a1c816 PB |
736 | mmap_reserve(real_start, real_end - real_start); |
737 | } else { | |
3e8f1628 | 738 | ret = munmap(g2h_untagged(real_start), real_end - real_start); |
68a1c816 | 739 | } |
54936004 FB |
740 | } |
741 | ||
77a8f1a5 | 742 | if (ret == 0) { |
c8a706fe | 743 | page_set_flags(start, start + len, 0); |
35865339 | 744 | tb_invalidate_phys_range(start, start + len); |
77a8f1a5 | 745 | } |
c8a706fe PB |
746 | mmap_unlock(); |
747 | return ret; | |
54936004 FB |
748 | } |
749 | ||
992f48a0 BS |
750 | abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, |
751 | abi_ulong new_size, unsigned long flags, | |
752 | abi_ulong new_addr) | |
54936004 FB |
753 | { |
754 | int prot; | |
f19412a2 | 755 | void *host_addr; |
54936004 | 756 | |
46b12f46 | 757 | if (!guest_range_valid_untagged(old_addr, old_size) || |
ebf9a363 | 758 | ((flags & MREMAP_FIXED) && |
46b12f46 | 759 | !guest_range_valid_untagged(new_addr, new_size)) || |
ccc5ccc1 | 760 | ((flags & MREMAP_MAYMOVE) == 0 && |
46b12f46 | 761 | !guest_range_valid_untagged(old_addr, new_size))) { |
ebf9a363 MF |
762 | errno = ENOMEM; |
763 | return -1; | |
764 | } | |
765 | ||
c8a706fe | 766 | mmap_lock(); |
f19412a2 | 767 | |
68a1c816 | 768 | if (flags & MREMAP_FIXED) { |
3e8f1628 RH |
769 | host_addr = mremap(g2h_untagged(old_addr), old_size, new_size, |
770 | flags, g2h_untagged(new_addr)); | |
68a1c816 | 771 | |
b76f21a7 | 772 | if (reserved_va && host_addr != MAP_FAILED) { |
68a1c816 PB |
773 | /* If new and old addresses overlap then the above mremap will |
774 | already have failed with EINVAL. */ | |
775 | mmap_reserve(old_addr, old_size); | |
776 | } | |
777 | } else if (flags & MREMAP_MAYMOVE) { | |
f19412a2 AJ |
778 | abi_ulong mmap_start; |
779 | ||
30ab9ef2 | 780 | mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE); |
f19412a2 AJ |
781 | |
782 | if (mmap_start == -1) { | |
783 | errno = ENOMEM; | |
784 | host_addr = MAP_FAILED; | |
68a1c816 | 785 | } else { |
3e8f1628 RH |
786 | host_addr = mremap(g2h_untagged(old_addr), old_size, new_size, |
787 | flags | MREMAP_FIXED, | |
788 | g2h_untagged(mmap_start)); | |
b76f21a7 | 789 | if (reserved_va) { |
c65ffe6d | 790 | mmap_reserve(old_addr, old_size); |
791 | } | |
68a1c816 | 792 | } |
3af72a4d | 793 | } else { |
68a1c816 | 794 | int prot = 0; |
b76f21a7 | 795 | if (reserved_va && old_size < new_size) { |
68a1c816 PB |
796 | abi_ulong addr; |
797 | for (addr = old_addr + old_size; | |
798 | addr < old_addr + new_size; | |
799 | addr++) { | |
800 | prot |= page_get_flags(addr); | |
801 | } | |
802 | } | |
803 | if (prot == 0) { | |
3e8f1628 RH |
804 | host_addr = mremap(g2h_untagged(old_addr), |
805 | old_size, new_size, flags); | |
56d19084 TK |
806 | |
807 | if (host_addr != MAP_FAILED) { | |
808 | /* Check if address fits target address space */ | |
46b12f46 | 809 | if (!guest_range_valid_untagged(h2g(host_addr), new_size)) { |
56d19084 | 810 | /* Revert mremap() changes */ |
3e8f1628 RH |
811 | host_addr = mremap(g2h_untagged(old_addr), |
812 | new_size, old_size, flags); | |
56d19084 TK |
813 | errno = ENOMEM; |
814 | host_addr = MAP_FAILED; | |
815 | } else if (reserved_va && old_size > new_size) { | |
816 | mmap_reserve(old_addr + old_size, old_size - new_size); | |
817 | } | |
68a1c816 PB |
818 | } |
819 | } else { | |
820 | errno = ENOMEM; | |
821 | host_addr = MAP_FAILED; | |
822 | } | |
f19412a2 AJ |
823 | } |
824 | ||
825 | if (host_addr == MAP_FAILED) { | |
c8a706fe PB |
826 | new_addr = -1; |
827 | } else { | |
828 | new_addr = h2g(host_addr); | |
829 | prot = page_get_flags(old_addr); | |
830 | page_set_flags(old_addr, old_addr + old_size, 0); | |
d9c58585 RH |
831 | page_set_flags(new_addr, new_addr + new_size, |
832 | prot | PAGE_VALID | PAGE_RESET); | |
c8a706fe | 833 | } |
35865339 | 834 | tb_invalidate_phys_range(new_addr, new_addr + new_size); |
c8a706fe | 835 | mmap_unlock(); |
54936004 FB |
836 | return new_addr; |
837 | } | |
892a4f6a IL |
838 | |
839 | static bool can_passthrough_madv_dontneed(abi_ulong start, abi_ulong end) | |
840 | { | |
841 | ulong addr; | |
842 | ||
843 | if ((start | end) & ~qemu_host_page_mask) { | |
844 | return false; | |
845 | } | |
846 | ||
847 | for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
848 | if (!(page_get_flags(addr) & PAGE_ANON)) { | |
849 | return false; | |
850 | } | |
851 | } | |
852 | ||
853 | return true; | |
854 | } | |
855 | ||
856 | abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice) | |
857 | { | |
858 | abi_ulong len, end; | |
859 | int ret = 0; | |
860 | ||
861 | if (start & ~TARGET_PAGE_MASK) { | |
862 | return -TARGET_EINVAL; | |
863 | } | |
864 | len = TARGET_PAGE_ALIGN(len_in); | |
865 | ||
866 | if (len_in && !len) { | |
867 | return -TARGET_EINVAL; | |
868 | } | |
869 | ||
870 | end = start + len; | |
871 | if (end < start) { | |
872 | return -TARGET_EINVAL; | |
873 | } | |
874 | ||
875 | if (end == start) { | |
876 | return 0; | |
877 | } | |
878 | ||
879 | if (!guest_range_valid_untagged(start, len)) { | |
880 | return -TARGET_EINVAL; | |
881 | } | |
882 | ||
883 | /* | |
884 | * A straight passthrough may not be safe because qemu sometimes turns | |
885 | * private file-backed mappings into anonymous mappings. | |
886 | * | |
887 | * This is a hint, so ignoring and returning success is ok. | |
888 | * | |
889 | * This breaks MADV_DONTNEED, completely implementing which is quite | |
890 | * complicated. However, there is one low-hanging fruit: host-page-aligned | |
891 | * anonymous mappings. In this case passthrough is safe, so do it. | |
892 | */ | |
893 | mmap_lock(); | |
f71fa4e3 | 894 | if (advice == MADV_DONTNEED && |
892a4f6a IL |
895 | can_passthrough_madv_dontneed(start, end)) { |
896 | ret = get_errno(madvise(g2h_untagged(start), len, MADV_DONTNEED)); | |
dbbf8975 VB |
897 | if (ret == 0) { |
898 | page_reset_target_data(start, start + len); | |
899 | } | |
892a4f6a IL |
900 | } |
901 | mmap_unlock(); | |
902 | ||
903 | return ret; | |
904 | } |