2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
24 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
25 static __thread int mmap_lock_count;
29 if (mmap_lock_count++ == 0) {
30 pthread_mutex_lock(&mmap_mutex);
34 void mmap_unlock(void)
36 if (--mmap_lock_count == 0) {
37 pthread_mutex_unlock(&mmap_mutex);
41 bool have_mmap_lock(void)
43 return mmap_lock_count > 0 ? true : false;
46 /* Grab lock to make sure things are in a consistent state after fork(). */
47 void mmap_fork_start(void)
51 pthread_mutex_lock(&mmap_mutex);
54 void mmap_fork_end(int child)
57 pthread_mutex_init(&mmap_mutex, NULL);
59 pthread_mutex_unlock(&mmap_mutex);
62 /* NOTE: all the constants are the HOST ones, but addresses are target. */
63 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
65 abi_ulong end, host_start, host_end, addr;
68 trace_target_mprotect(start, len, prot);
70 if ((start & ~TARGET_PAGE_MASK) != 0)
71 return -TARGET_EINVAL;
72 len = TARGET_PAGE_ALIGN(len);
74 if (!guest_range_valid(start, len)) {
75 return -TARGET_ENOMEM;
77 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
82 host_start = start & qemu_host_page_mask;
83 host_end = HOST_PAGE_ALIGN(end);
84 if (start > host_start) {
85 /* handle host page containing start */
87 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
88 prot1 |= page_get_flags(addr);
90 if (host_end == host_start + qemu_host_page_size) {
91 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
92 prot1 |= page_get_flags(addr);
96 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
99 host_start += qemu_host_page_size;
101 if (end < host_end) {
103 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
104 prot1 |= page_get_flags(addr);
106 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
110 host_end -= qemu_host_page_size;
113 /* handle the pages in the middle */
114 if (host_start < host_end) {
115 ret = mprotect(g2h(host_start), host_end - host_start, prot);
119 page_set_flags(start, start + len, prot | PAGE_VALID);
127 /* map an incomplete host page */
128 static int mmap_frag(abi_ulong real_start,
129 abi_ulong start, abi_ulong end,
130 int prot, int flags, int fd, abi_ulong offset)
132 abi_ulong real_end, addr;
136 real_end = real_start + qemu_host_page_size;
137 host_start = g2h(real_start);
139 /* get the protection of the target pages outside the mapping */
141 for(addr = real_start; addr < real_end; addr++) {
142 if (addr < start || addr >= end)
143 prot1 |= page_get_flags(addr);
147 /* no page was there, so we allocate one */
148 void *p = mmap(host_start, qemu_host_page_size, prot,
149 flags | MAP_ANONYMOUS, -1, 0);
156 prot_new = prot | prot1;
157 if (!(flags & MAP_ANONYMOUS)) {
158 /* msync() won't work here, so we return an error if write is
159 possible while it is a shared mapping */
160 if ((flags & MAP_TYPE) == MAP_SHARED &&
164 /* adjust protection to be able to read */
165 if (!(prot1 & PROT_WRITE))
166 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
168 /* read the corresponding file data */
169 if (pread(fd, g2h(start), end - start, offset) == -1)
172 /* put final protection */
173 if (prot_new != (prot1 | PROT_WRITE))
174 mprotect(host_start, qemu_host_page_size, prot_new);
176 if (prot_new != prot1) {
177 mprotect(host_start, qemu_host_page_size, prot_new);
179 if (prot_new & PROT_WRITE) {
180 memset(g2h(start), 0, end - start);
186 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
187 # define TASK_UNMAPPED_BASE (1ul << 38)
189 # define TASK_UNMAPPED_BASE 0x40000000
191 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
193 unsigned long last_brk;
195 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
196 of guest address space. */
197 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
200 abi_ulong addr, end_addr, incr = qemu_host_page_size;
204 if (size > reserved_va) {
205 return (abi_ulong)-1;
208 /* Note that start and size have already been aligned by mmap_find_vma. */
210 end_addr = start + size;
211 if (start > reserved_va - size) {
212 /* Start at the top of the address space. */
213 end_addr = ((reserved_va - size) & -align) + size;
217 /* Search downward from END_ADDR, checking to see if a page is in use. */
221 if (addr > end_addr) {
223 /* Failure. The entire address space has been searched. */
224 return (abi_ulong)-1;
226 /* Re-start at the top of the address space. */
227 addr = end_addr = ((reserved_va - size) & -align) + size;
230 prot = page_get_flags(addr);
232 /* Page in use. Restart below this page. */
233 addr = end_addr = ((addr - size) & -align) + size;
234 } else if (addr && addr + size == end_addr) {
235 /* Success! All pages between ADDR and END_ADDR are free. */
236 if (start == mmap_next_start) {
237 mmap_next_start = addr;
246 * Find and reserve a free memory area of size 'size'. The search
248 * It must be called with mmap_lock() held.
249 * Return -1 if error.
251 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
257 align = MAX(align, qemu_host_page_size);
259 /* If 'start' == 0, then a default start address is used. */
261 start = mmap_next_start;
263 start &= qemu_host_page_mask;
265 start = ROUND_UP(start, align);
267 size = HOST_PAGE_ALIGN(size);
270 return mmap_find_vma_reserved(start, size, align);
274 wrapped = repeat = 0;
277 for (;; prev = ptr) {
279 * Reserve needed memory area to avoid a race.
280 * It should be discarded using:
281 * - mmap() with MAP_FIXED flag
282 * - mremap() with MREMAP_FIXED flag
283 * - shmat() with SHM_REMAP flag
285 ptr = mmap(g2h(addr), size, PROT_NONE,
286 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
288 /* ENOMEM, if host address space has no memory */
289 if (ptr == MAP_FAILED) {
290 return (abi_ulong)-1;
293 /* Count the number of sequential returns of the same address.
294 This is used to modify the search algorithm below. */
295 repeat = (ptr == prev ? repeat + 1 : 0);
297 if (h2g_valid(ptr + size - 1)) {
300 if ((addr & (align - 1)) == 0) {
302 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
303 mmap_next_start = addr + size;
308 /* The address is not properly aligned for the target. */
311 /* Assume the result that the kernel gave us is the
312 first with enough free space, so start again at the
313 next higher target page. */
314 addr = ROUND_UP(addr, align);
317 /* Sometimes the kernel decides to perform the allocation
318 at the top end of memory instead. */
322 /* Start over at low memory. */
326 /* Fail. This unaligned block must the last. */
331 /* Since the result the kernel gave didn't fit, start
332 again at low memory. If any repetition, fail. */
333 addr = (repeat ? -1 : 0);
336 /* Unmap and try again. */
339 /* ENOMEM if we checked the whole of the target address space. */
340 if (addr == (abi_ulong)-1) {
341 return (abi_ulong)-1;
342 } else if (addr == 0) {
344 return (abi_ulong)-1;
347 /* Don't actually use 0 when wrapping, instead indicate
348 that we'd truly like an allocation in low memory. */
349 addr = (mmap_min_addr > TARGET_PAGE_SIZE
350 ? TARGET_PAGE_ALIGN(mmap_min_addr)
352 } else if (wrapped && addr >= start) {
353 return (abi_ulong)-1;
358 /* NOTE: all the constants are the HOST ones */
359 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
360 int flags, int fd, abi_ulong offset)
362 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
365 trace_target_mmap(start, len, prot, flags, fd, offset);
372 /* Also check for overflows... */
373 len = TARGET_PAGE_ALIGN(len);
379 if (offset & ~TARGET_PAGE_MASK) {
384 real_start = start & qemu_host_page_mask;
385 host_offset = offset & qemu_host_page_mask;
387 /* If the user is asking for the kernel to find a location, do that
388 before we truncate the length for mapping files below. */
389 if (!(flags & MAP_FIXED)) {
390 host_len = len + offset - host_offset;
391 host_len = HOST_PAGE_ALIGN(host_len);
392 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
393 if (start == (abi_ulong)-1) {
399 /* When mapping files into a memory area larger than the file, accesses
400 to pages beyond the file size will cause a SIGBUS.
402 For example, if mmaping a file of 100 bytes on a host with 4K pages
403 emulating a target with 8K pages, the target expects to be able to
404 access the first 8K. But the host will trap us on any access beyond
407 When emulating a target with a larger page-size than the hosts, we
408 may need to truncate file maps at EOF and add extra anonymous pages
409 up to the targets page boundary. */
411 if ((qemu_real_host_page_size < qemu_host_page_size) &&
412 !(flags & MAP_ANONYMOUS)) {
415 if (fstat (fd, &sb) == -1)
418 /* Are we trying to create a map beyond EOF?. */
419 if (offset + len > sb.st_size) {
420 /* If so, truncate the file map at eof aligned with
421 the hosts real pagesize. Additional anonymous maps
422 will be created beyond EOF. */
423 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
427 if (!(flags & MAP_FIXED)) {
428 unsigned long host_start;
431 host_len = len + offset - host_offset;
432 host_len = HOST_PAGE_ALIGN(host_len);
434 /* Note: we prefer to control the mapping address. It is
435 especially important if qemu_host_page_size >
436 qemu_real_host_page_size */
437 p = mmap(g2h(start), host_len, prot,
438 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
441 /* update start so that it points to the file position at 'offset' */
442 host_start = (unsigned long)p;
443 if (!(flags & MAP_ANONYMOUS)) {
444 p = mmap(g2h(start), len, prot,
445 flags | MAP_FIXED, fd, host_offset);
446 if (p == MAP_FAILED) {
447 munmap(g2h(start), host_len);
450 host_start += offset - host_offset;
452 start = h2g(host_start);
454 if (start & ~TARGET_PAGE_MASK) {
459 real_end = HOST_PAGE_ALIGN(end);
462 * Test if requested memory area fits target address space
463 * It can fail only on 64-bit host with 32-bit target.
464 * On any other target/host host mmap() handles this error correctly.
466 if (!guest_range_valid(start, len)) {
471 /* worst case: we cannot map the file because the offset is not
472 aligned, so we read it */
473 if (!(flags & MAP_ANONYMOUS) &&
474 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
475 /* msync() won't work here, so we return an error if write is
476 possible while it is a shared mapping */
477 if ((flags & MAP_TYPE) == MAP_SHARED &&
478 (prot & PROT_WRITE)) {
482 retaddr = target_mmap(start, len, prot | PROT_WRITE,
483 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
487 if (pread(fd, g2h(start), len, offset) == -1)
489 if (!(prot & PROT_WRITE)) {
490 ret = target_mprotect(start, len, prot);
496 /* handle the start of the mapping */
497 if (start > real_start) {
498 if (real_end == real_start + qemu_host_page_size) {
499 /* one single host page */
500 ret = mmap_frag(real_start, start, end,
501 prot, flags, fd, offset);
506 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
507 prot, flags, fd, offset);
510 real_start += qemu_host_page_size;
512 /* handle the end of the mapping */
513 if (end < real_end) {
514 ret = mmap_frag(real_end - qemu_host_page_size,
515 real_end - qemu_host_page_size, end,
517 offset + real_end - qemu_host_page_size - start);
520 real_end -= qemu_host_page_size;
523 /* map the middle (easier) */
524 if (real_start < real_end) {
526 unsigned long offset1;
527 if (flags & MAP_ANONYMOUS)
530 offset1 = offset + real_start - start;
531 p = mmap(g2h(real_start), real_end - real_start,
532 prot, flags, fd, offset1);
538 page_set_flags(start, start + len, prot | PAGE_VALID);
540 trace_target_mmap_complete(start);
541 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
542 log_page_dump(__func__);
544 tb_invalidate_phys_range(start, start + len);
552 static void mmap_reserve(abi_ulong start, abi_ulong size)
554 abi_ulong real_start;
560 real_start = start & qemu_host_page_mask;
561 real_end = HOST_PAGE_ALIGN(start + size);
563 if (start > real_start) {
564 /* handle host page containing start */
566 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
567 prot |= page_get_flags(addr);
569 if (real_end == real_start + qemu_host_page_size) {
570 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
571 prot |= page_get_flags(addr);
576 real_start += qemu_host_page_size;
578 if (end < real_end) {
580 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
581 prot |= page_get_flags(addr);
584 real_end -= qemu_host_page_size;
586 if (real_start != real_end) {
587 mmap(g2h(real_start), real_end - real_start, PROT_NONE,
588 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
593 int target_munmap(abi_ulong start, abi_ulong len)
595 abi_ulong end, real_start, real_end, addr;
598 trace_target_munmap(start, len);
600 if (start & ~TARGET_PAGE_MASK)
601 return -TARGET_EINVAL;
602 len = TARGET_PAGE_ALIGN(len);
603 if (len == 0 || !guest_range_valid(start, len)) {
604 return -TARGET_EINVAL;
609 real_start = start & qemu_host_page_mask;
610 real_end = HOST_PAGE_ALIGN(end);
612 if (start > real_start) {
613 /* handle host page containing start */
615 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
616 prot |= page_get_flags(addr);
618 if (real_end == real_start + qemu_host_page_size) {
619 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
620 prot |= page_get_flags(addr);
625 real_start += qemu_host_page_size;
627 if (end < real_end) {
629 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
630 prot |= page_get_flags(addr);
633 real_end -= qemu_host_page_size;
637 /* unmap what we can */
638 if (real_start < real_end) {
640 mmap_reserve(real_start, real_end - real_start);
642 ret = munmap(g2h(real_start), real_end - real_start);
647 page_set_flags(start, start + len, 0);
648 tb_invalidate_phys_range(start, start + len);
654 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
655 abi_ulong new_size, unsigned long flags,
661 if (!guest_range_valid(old_addr, old_size) ||
662 ((flags & MREMAP_FIXED) &&
663 !guest_range_valid(new_addr, new_size))) {
670 if (flags & MREMAP_FIXED) {
671 host_addr = mremap(g2h(old_addr), old_size, new_size,
672 flags, g2h(new_addr));
674 if (reserved_va && host_addr != MAP_FAILED) {
675 /* If new and old addresses overlap then the above mremap will
676 already have failed with EINVAL. */
677 mmap_reserve(old_addr, old_size);
679 } else if (flags & MREMAP_MAYMOVE) {
680 abi_ulong mmap_start;
682 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
684 if (mmap_start == -1) {
686 host_addr = MAP_FAILED;
688 host_addr = mremap(g2h(old_addr), old_size, new_size,
689 flags | MREMAP_FIXED, g2h(mmap_start));
691 mmap_reserve(old_addr, old_size);
696 if (reserved_va && old_size < new_size) {
698 for (addr = old_addr + old_size;
699 addr < old_addr + new_size;
701 prot |= page_get_flags(addr);
705 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
706 if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
707 mmap_reserve(old_addr + old_size, new_size - old_size);
711 host_addr = MAP_FAILED;
713 /* Check if address fits target address space */
714 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
715 /* Revert mremap() changes */
716 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
718 host_addr = MAP_FAILED;
722 if (host_addr == MAP_FAILED) {
725 new_addr = h2g(host_addr);
726 prot = page_get_flags(old_addr);
727 page_set_flags(old_addr, old_addr + old_size, 0);
728 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
730 tb_invalidate_phys_range(new_addr, new_addr + new_size);