2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
24 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
25 static __thread int mmap_lock_count;
29 if (mmap_lock_count++ == 0) {
30 pthread_mutex_lock(&mmap_mutex);
34 void mmap_unlock(void)
36 if (--mmap_lock_count == 0) {
37 pthread_mutex_unlock(&mmap_mutex);
41 bool have_mmap_lock(void)
43 return mmap_lock_count > 0 ? true : false;
46 /* Grab lock to make sure things are in a consistent state after fork(). */
47 void mmap_fork_start(void)
51 pthread_mutex_lock(&mmap_mutex);
54 void mmap_fork_end(int child)
57 pthread_mutex_init(&mmap_mutex, NULL);
59 pthread_mutex_unlock(&mmap_mutex);
63 * Validate target prot bitmask.
64 * Return the prot bitmask for the host in *HOST_PROT.
65 * Return 0 if the target prot bitmask is invalid, otherwise
66 * the internal qemu page_flags (which will include PAGE_VALID).
68 static int validate_prot_to_pageflags(int *host_prot, int prot)
70 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
71 int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
74 * For the host, we need not pass anything except read/write/exec.
75 * While PROT_SEM is allowed by all hosts, it is also ignored, so
76 * don't bother transforming guest bit to host bit. Any other
77 * target-specific prot bits will not be understood by the host
78 * and will need to be encoded into page_flags for qemu emulation.
80 * Pages that are executable by the guest will never be executed
81 * by the host, but the host will need to be able to read them.
83 *host_prot = (prot & (PROT_READ | PROT_WRITE))
84 | (prot & PROT_EXEC ? PROT_READ : 0);
88 ARMCPU *cpu = ARM_CPU(thread_cpu);
91 * The PROT_BTI bit is only accepted if the cpu supports the feature.
92 * Since this is the unusual case, don't bother checking unless
93 * the bit has been requested. If set and valid, record the bit
94 * within QEMU's page_flags.
96 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
97 valid |= TARGET_PROT_BTI;
98 page_flags |= PAGE_BTI;
100 /* Similarly for the PROT_MTE bit. */
101 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
102 valid |= TARGET_PROT_MTE;
103 page_flags |= PAGE_MTE;
108 return prot & ~valid ? 0 : page_flags;
111 /* NOTE: all the constants are the HOST ones, but addresses are target. */
112 int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
114 abi_ulong end, host_start, host_end, addr;
115 int prot1, ret, page_flags, host_prot;
117 trace_target_mprotect(start, len, target_prot);
119 if ((start & ~TARGET_PAGE_MASK) != 0) {
120 return -TARGET_EINVAL;
122 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
124 return -TARGET_EINVAL;
126 len = TARGET_PAGE_ALIGN(len);
128 if (!guest_range_valid_untagged(start, len)) {
129 return -TARGET_ENOMEM;
136 host_start = start & qemu_host_page_mask;
137 host_end = HOST_PAGE_ALIGN(end);
138 if (start > host_start) {
139 /* handle host page containing start */
141 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
142 prot1 |= page_get_flags(addr);
144 if (host_end == host_start + qemu_host_page_size) {
145 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
146 prot1 |= page_get_flags(addr);
150 ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
155 host_start += qemu_host_page_size;
157 if (end < host_end) {
159 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
160 prot1 |= page_get_flags(addr);
162 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
163 qemu_host_page_size, prot1 & PAGE_BITS);
167 host_end -= qemu_host_page_size;
170 /* handle the pages in the middle */
171 if (host_start < host_end) {
172 ret = mprotect(g2h_untagged(host_start),
173 host_end - host_start, host_prot);
178 page_set_flags(start, start + len, page_flags);
186 /* map an incomplete host page */
187 static int mmap_frag(abi_ulong real_start,
188 abi_ulong start, abi_ulong end,
189 int prot, int flags, int fd, abi_ulong offset)
191 abi_ulong real_end, addr;
195 real_end = real_start + qemu_host_page_size;
196 host_start = g2h_untagged(real_start);
198 /* get the protection of the target pages outside the mapping */
200 for(addr = real_start; addr < real_end; addr++) {
201 if (addr < start || addr >= end)
202 prot1 |= page_get_flags(addr);
206 /* no page was there, so we allocate one */
207 void *p = mmap(host_start, qemu_host_page_size, prot,
208 flags | MAP_ANONYMOUS, -1, 0);
215 prot_new = prot | prot1;
216 if (!(flags & MAP_ANONYMOUS)) {
217 /* msync() won't work here, so we return an error if write is
218 possible while it is a shared mapping */
219 if ((flags & MAP_TYPE) == MAP_SHARED &&
223 /* adjust protection to be able to read */
224 if (!(prot1 & PROT_WRITE))
225 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
227 /* read the corresponding file data */
228 if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
231 /* put final protection */
232 if (prot_new != (prot1 | PROT_WRITE))
233 mprotect(host_start, qemu_host_page_size, prot_new);
235 if (prot_new != prot1) {
236 mprotect(host_start, qemu_host_page_size, prot_new);
238 if (prot_new & PROT_WRITE) {
239 memset(g2h_untagged(start), 0, end - start);
245 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
246 #ifdef TARGET_AARCH64
247 # define TASK_UNMAPPED_BASE 0x5500000000
249 # define TASK_UNMAPPED_BASE (1ul << 38)
252 # define TASK_UNMAPPED_BASE 0x40000000
254 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
256 unsigned long last_brk;
258 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
259 of guest address space. */
260 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
263 abi_ulong addr, end_addr, incr = qemu_host_page_size;
267 if (size > reserved_va) {
268 return (abi_ulong)-1;
271 /* Note that start and size have already been aligned by mmap_find_vma. */
273 end_addr = start + size;
274 if (start > reserved_va - size) {
275 /* Start at the top of the address space. */
276 end_addr = ((reserved_va - size) & -align) + size;
280 /* Search downward from END_ADDR, checking to see if a page is in use. */
284 if (addr > end_addr) {
286 /* Failure. The entire address space has been searched. */
287 return (abi_ulong)-1;
289 /* Re-start at the top of the address space. */
290 addr = end_addr = ((reserved_va - size) & -align) + size;
293 prot = page_get_flags(addr);
295 /* Page in use. Restart below this page. */
296 addr = end_addr = ((addr - size) & -align) + size;
297 } else if (addr && addr + size == end_addr) {
298 /* Success! All pages between ADDR and END_ADDR are free. */
299 if (start == mmap_next_start) {
300 mmap_next_start = addr;
309 * Find and reserve a free memory area of size 'size'. The search
311 * It must be called with mmap_lock() held.
312 * Return -1 if error.
314 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
320 align = MAX(align, qemu_host_page_size);
322 /* If 'start' == 0, then a default start address is used. */
324 start = mmap_next_start;
326 start &= qemu_host_page_mask;
328 start = ROUND_UP(start, align);
330 size = HOST_PAGE_ALIGN(size);
333 return mmap_find_vma_reserved(start, size, align);
337 wrapped = repeat = 0;
340 for (;; prev = ptr) {
342 * Reserve needed memory area to avoid a race.
343 * It should be discarded using:
344 * - mmap() with MAP_FIXED flag
345 * - mremap() with MREMAP_FIXED flag
346 * - shmat() with SHM_REMAP flag
348 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
349 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
351 /* ENOMEM, if host address space has no memory */
352 if (ptr == MAP_FAILED) {
353 return (abi_ulong)-1;
356 /* Count the number of sequential returns of the same address.
357 This is used to modify the search algorithm below. */
358 repeat = (ptr == prev ? repeat + 1 : 0);
360 if (h2g_valid(ptr + size - 1)) {
363 if ((addr & (align - 1)) == 0) {
365 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
366 mmap_next_start = addr + size;
371 /* The address is not properly aligned for the target. */
374 /* Assume the result that the kernel gave us is the
375 first with enough free space, so start again at the
376 next higher target page. */
377 addr = ROUND_UP(addr, align);
380 /* Sometimes the kernel decides to perform the allocation
381 at the top end of memory instead. */
385 /* Start over at low memory. */
389 /* Fail. This unaligned block must the last. */
394 /* Since the result the kernel gave didn't fit, start
395 again at low memory. If any repetition, fail. */
396 addr = (repeat ? -1 : 0);
399 /* Unmap and try again. */
402 /* ENOMEM if we checked the whole of the target address space. */
403 if (addr == (abi_ulong)-1) {
404 return (abi_ulong)-1;
405 } else if (addr == 0) {
407 return (abi_ulong)-1;
410 /* Don't actually use 0 when wrapping, instead indicate
411 that we'd truly like an allocation in low memory. */
412 addr = (mmap_min_addr > TARGET_PAGE_SIZE
413 ? TARGET_PAGE_ALIGN(mmap_min_addr)
415 } else if (wrapped && addr >= start) {
416 return (abi_ulong)-1;
421 /* NOTE: all the constants are the HOST ones */
422 abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
423 int flags, int fd, abi_ulong offset)
425 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
426 int page_flags, host_prot;
429 trace_target_mmap(start, len, target_prot, flags, fd, offset);
436 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
442 /* Also check for overflows... */
443 len = TARGET_PAGE_ALIGN(len);
449 if (offset & ~TARGET_PAGE_MASK) {
454 real_start = start & qemu_host_page_mask;
455 host_offset = offset & qemu_host_page_mask;
457 /* If the user is asking for the kernel to find a location, do that
458 before we truncate the length for mapping files below. */
459 if (!(flags & MAP_FIXED)) {
460 host_len = len + offset - host_offset;
461 host_len = HOST_PAGE_ALIGN(host_len);
462 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
463 if (start == (abi_ulong)-1) {
469 /* When mapping files into a memory area larger than the file, accesses
470 to pages beyond the file size will cause a SIGBUS.
472 For example, if mmaping a file of 100 bytes on a host with 4K pages
473 emulating a target with 8K pages, the target expects to be able to
474 access the first 8K. But the host will trap us on any access beyond
477 When emulating a target with a larger page-size than the hosts, we
478 may need to truncate file maps at EOF and add extra anonymous pages
479 up to the targets page boundary. */
481 if ((qemu_real_host_page_size < qemu_host_page_size) &&
482 !(flags & MAP_ANONYMOUS)) {
485 if (fstat (fd, &sb) == -1)
488 /* Are we trying to create a map beyond EOF?. */
489 if (offset + len > sb.st_size) {
490 /* If so, truncate the file map at eof aligned with
491 the hosts real pagesize. Additional anonymous maps
492 will be created beyond EOF. */
493 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
497 if (!(flags & MAP_FIXED)) {
498 unsigned long host_start;
501 host_len = len + offset - host_offset;
502 host_len = HOST_PAGE_ALIGN(host_len);
504 /* Note: we prefer to control the mapping address. It is
505 especially important if qemu_host_page_size >
506 qemu_real_host_page_size */
507 p = mmap(g2h_untagged(start), host_len, host_prot,
508 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
509 if (p == MAP_FAILED) {
512 /* update start so that it points to the file position at 'offset' */
513 host_start = (unsigned long)p;
514 if (!(flags & MAP_ANONYMOUS)) {
515 p = mmap(g2h_untagged(start), len, host_prot,
516 flags | MAP_FIXED, fd, host_offset);
517 if (p == MAP_FAILED) {
518 munmap(g2h_untagged(start), host_len);
521 host_start += offset - host_offset;
523 start = h2g(host_start);
525 if (start & ~TARGET_PAGE_MASK) {
530 real_end = HOST_PAGE_ALIGN(end);
533 * Test if requested memory area fits target address space
534 * It can fail only on 64-bit host with 32-bit target.
535 * On any other target/host host mmap() handles this error correctly.
537 if (end < start || !guest_range_valid_untagged(start, len)) {
542 /* worst case: we cannot map the file because the offset is not
543 aligned, so we read it */
544 if (!(flags & MAP_ANONYMOUS) &&
545 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
546 /* msync() won't work here, so we return an error if write is
547 possible while it is a shared mapping */
548 if ((flags & MAP_TYPE) == MAP_SHARED &&
549 (host_prot & PROT_WRITE)) {
553 retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
554 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
558 if (pread(fd, g2h_untagged(start), len, offset) == -1)
560 if (!(host_prot & PROT_WRITE)) {
561 ret = target_mprotect(start, len, target_prot);
567 /* handle the start of the mapping */
568 if (start > real_start) {
569 if (real_end == real_start + qemu_host_page_size) {
570 /* one single host page */
571 ret = mmap_frag(real_start, start, end,
572 host_prot, flags, fd, offset);
577 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
578 host_prot, flags, fd, offset);
581 real_start += qemu_host_page_size;
583 /* handle the end of the mapping */
584 if (end < real_end) {
585 ret = mmap_frag(real_end - qemu_host_page_size,
586 real_end - qemu_host_page_size, end,
587 host_prot, flags, fd,
588 offset + real_end - qemu_host_page_size - start);
591 real_end -= qemu_host_page_size;
594 /* map the middle (easier) */
595 if (real_start < real_end) {
597 unsigned long offset1;
598 if (flags & MAP_ANONYMOUS)
601 offset1 = offset + real_start - start;
602 p = mmap(g2h_untagged(real_start), real_end - real_start,
603 host_prot, flags, fd, offset1);
609 if (flags & MAP_ANONYMOUS) {
610 page_flags |= PAGE_ANON;
612 page_flags |= PAGE_RESET;
613 page_set_flags(start, start + len, page_flags);
615 trace_target_mmap_complete(start);
616 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
617 log_page_dump(__func__);
619 tb_invalidate_phys_range(start, start + len);
627 static void mmap_reserve(abi_ulong start, abi_ulong size)
629 abi_ulong real_start;
635 real_start = start & qemu_host_page_mask;
636 real_end = HOST_PAGE_ALIGN(start + size);
638 if (start > real_start) {
639 /* handle host page containing start */
641 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
642 prot |= page_get_flags(addr);
644 if (real_end == real_start + qemu_host_page_size) {
645 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
646 prot |= page_get_flags(addr);
651 real_start += qemu_host_page_size;
653 if (end < real_end) {
655 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
656 prot |= page_get_flags(addr);
659 real_end -= qemu_host_page_size;
661 if (real_start != real_end) {
662 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
663 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
668 int target_munmap(abi_ulong start, abi_ulong len)
670 abi_ulong end, real_start, real_end, addr;
673 trace_target_munmap(start, len);
675 if (start & ~TARGET_PAGE_MASK)
676 return -TARGET_EINVAL;
677 len = TARGET_PAGE_ALIGN(len);
678 if (len == 0 || !guest_range_valid_untagged(start, len)) {
679 return -TARGET_EINVAL;
684 real_start = start & qemu_host_page_mask;
685 real_end = HOST_PAGE_ALIGN(end);
687 if (start > real_start) {
688 /* handle host page containing start */
690 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
691 prot |= page_get_flags(addr);
693 if (real_end == real_start + qemu_host_page_size) {
694 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
695 prot |= page_get_flags(addr);
700 real_start += qemu_host_page_size;
702 if (end < real_end) {
704 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
705 prot |= page_get_flags(addr);
708 real_end -= qemu_host_page_size;
712 /* unmap what we can */
713 if (real_start < real_end) {
715 mmap_reserve(real_start, real_end - real_start);
717 ret = munmap(g2h_untagged(real_start), real_end - real_start);
722 page_set_flags(start, start + len, 0);
723 tb_invalidate_phys_range(start, start + len);
729 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
730 abi_ulong new_size, unsigned long flags,
736 if (!guest_range_valid_untagged(old_addr, old_size) ||
737 ((flags & MREMAP_FIXED) &&
738 !guest_range_valid_untagged(new_addr, new_size)) ||
739 ((flags & MREMAP_MAYMOVE) == 0 &&
740 !guest_range_valid_untagged(old_addr, new_size))) {
747 if (flags & MREMAP_FIXED) {
748 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
749 flags, g2h_untagged(new_addr));
751 if (reserved_va && host_addr != MAP_FAILED) {
752 /* If new and old addresses overlap then the above mremap will
753 already have failed with EINVAL. */
754 mmap_reserve(old_addr, old_size);
756 } else if (flags & MREMAP_MAYMOVE) {
757 abi_ulong mmap_start;
759 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
761 if (mmap_start == -1) {
763 host_addr = MAP_FAILED;
765 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
766 flags | MREMAP_FIXED,
767 g2h_untagged(mmap_start));
769 mmap_reserve(old_addr, old_size);
774 if (reserved_va && old_size < new_size) {
776 for (addr = old_addr + old_size;
777 addr < old_addr + new_size;
779 prot |= page_get_flags(addr);
783 host_addr = mremap(g2h_untagged(old_addr),
784 old_size, new_size, flags);
786 if (host_addr != MAP_FAILED) {
787 /* Check if address fits target address space */
788 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
789 /* Revert mremap() changes */
790 host_addr = mremap(g2h_untagged(old_addr),
791 new_size, old_size, flags);
793 host_addr = MAP_FAILED;
794 } else if (reserved_va && old_size > new_size) {
795 mmap_reserve(old_addr + old_size, old_size - new_size);
800 host_addr = MAP_FAILED;
804 if (host_addr == MAP_FAILED) {
807 new_addr = h2g(host_addr);
808 prot = page_get_flags(old_addr);
809 page_set_flags(old_addr, old_addr + old_size, 0);
810 page_set_flags(new_addr, new_addr + new_size,
811 prot | PAGE_VALID | PAGE_RESET);
813 tb_invalidate_phys_range(new_addr, new_addr + new_size);