2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 /* NOTE: all the constants are the HOST ones, but addresses are target. */
33 int target_mprotect(target_ulong start, target_ulong len, int prot)
35 target_ulong end, host_start, host_end, addr;
39 printf("mprotect: start=0x" TARGET_FMT_lx
40 "len=0x" TARGET_FMT_lx " prot=%c%c%c\n", start, len,
41 prot & PROT_READ ? 'r' : '-',
42 prot & PROT_WRITE ? 'w' : '-',
43 prot & PROT_EXEC ? 'x' : '-');
46 if ((start & ~TARGET_PAGE_MASK) != 0)
48 len = TARGET_PAGE_ALIGN(len);
52 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
57 host_start = start & qemu_host_page_mask;
58 host_end = HOST_PAGE_ALIGN(end);
59 if (start > host_start) {
60 /* handle host page containing start */
62 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
63 prot1 |= page_get_flags(addr);
65 if (host_end == host_start + qemu_host_page_size) {
66 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
67 prot1 |= page_get_flags(addr);
71 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
74 host_start += qemu_host_page_size;
78 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
79 prot1 |= page_get_flags(addr);
81 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
85 host_end -= qemu_host_page_size;
88 /* handle the pages in the middle */
89 if (host_start < host_end) {
90 ret = mprotect(g2h(host_start), host_end - host_start, prot);
94 page_set_flags(start, start + len, prot | PAGE_VALID);
98 /* map an incomplete host page */
99 static int mmap_frag(target_ulong real_start,
100 target_ulong start, target_ulong end,
101 int prot, int flags, int fd, target_ulong offset)
103 target_ulong real_end, ret, addr;
107 real_end = real_start + qemu_host_page_size;
108 host_start = g2h(real_start);
110 /* get the protection of the target pages outside the mapping */
112 for(addr = real_start; addr < real_end; addr++) {
113 if (addr < start || addr >= end)
114 prot1 |= page_get_flags(addr);
118 /* no page was there, so we allocate one */
119 ret = (long)mmap(host_start, qemu_host_page_size, prot,
120 flags | MAP_ANONYMOUS, -1, 0);
127 prot_new = prot | prot1;
128 if (!(flags & MAP_ANONYMOUS)) {
129 /* msync() won't work here, so we return an error if write is
130 possible while it is a shared mapping */
131 if ((flags & MAP_TYPE) == MAP_SHARED &&
135 /* adjust protection to be able to read */
136 if (!(prot1 & PROT_WRITE))
137 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
139 /* read the corresponding file data */
140 pread(fd, g2h(start), end - start, offset);
142 /* put final protection */
143 if (prot_new != (prot1 | PROT_WRITE))
144 mprotect(host_start, qemu_host_page_size, prot_new);
146 /* just update the protection */
147 if (prot_new != prot1) {
148 mprotect(host_start, qemu_host_page_size, prot_new);
154 /* NOTE: all the constants are the HOST ones */
155 target_long target_mmap(target_ulong start, target_ulong len, int prot,
156 int flags, int fd, target_ulong offset)
158 target_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
159 unsigned long host_start;
160 #if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__) || \
161 defined(__ia64) || defined(__mips__)
162 static target_ulong last_start = 0x40000000;
163 #elif defined(__CYGWIN__)
164 /* Cygwin doesn't have a whole lot of address space. */
165 static target_ulong last_start = 0x18000000;
170 printf("mmap: start=0x" TARGET_FMT_lx
171 " len=0x" TARGET_FMT_lx " prot=%c%c%c flags=",
173 prot & PROT_READ ? 'r' : '-',
174 prot & PROT_WRITE ? 'w' : '-',
175 prot & PROT_EXEC ? 'x' : '-');
176 if (flags & MAP_FIXED)
177 printf("MAP_FIXED ");
178 if (flags & MAP_ANONYMOUS)
180 switch(flags & MAP_TYPE) {
182 printf("MAP_PRIVATE ");
185 printf("MAP_SHARED ");
188 printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
191 printf("fd=%d offset=" TARGET_FMT_lx "\n", fd, offset);
195 if (offset & ~TARGET_PAGE_MASK) {
200 len = TARGET_PAGE_ALIGN(len);
203 real_start = start & qemu_host_page_mask;
205 if (!(flags & MAP_FIXED)) {
206 #if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__) || \
207 defined(__ia64) || defined(__mips__) || defined(__CYGWIN__)
208 /* tell the kernel to search at the same place as i386 */
209 if (real_start == 0) {
210 real_start = last_start;
211 last_start += HOST_PAGE_ALIGN(len);
214 host_offset = offset & qemu_host_page_mask;
215 host_len = len + offset - host_offset;
217 if (qemu_host_page_size > qemu_real_host_page_size) {
219 * The guest expects to see mmapped areas aligned to it's pagesize.
220 * If the host's real page size is smaller than the guest's, we need
221 * to fixup the maps. It is done by allocating a larger area,
222 * displacing the map (if needed) and finally chopping off the spare
227 * We assume qemu_host_page_size is always the same as
228 * TARGET_PAGE_SIZE, see exec.c. qemu_real_host_page_size is the
229 * hosts real page size.
231 target_ulong host_end;
232 unsigned long host_aligned_start;
234 host_len = HOST_PAGE_ALIGN(host_len + qemu_host_page_size
235 - qemu_real_host_page_size);
236 host_start = (unsigned long) mmap(real_start ?
237 g2h(real_start) : NULL,
238 host_len, prot, flags,
240 if (host_start == -1)
243 host_end = host_start + host_len;
245 /* Find start and end, aligned to the targets pagesize with-in the
246 large mmaped area. */
247 host_aligned_start = TARGET_PAGE_ALIGN(host_start);
248 if (!(flags & MAP_ANONYMOUS))
249 host_aligned_start += offset - host_offset;
251 start = h2g(host_aligned_start);
252 end = start + TARGET_PAGE_ALIGN(len);
254 /* Chop off the leftovers, if any. */
255 if (host_aligned_start > host_start)
256 munmap((void *)host_start, host_aligned_start - host_start);
258 munmap((void *)g2h(end), host_end - end);
262 /* if not fixed, no need to do anything */
263 host_start = (long)mmap(real_start ? g2h(real_start) : NULL,
264 host_len, prot, flags, fd, host_offset);
265 if (host_start == -1)
267 /* update start so that it points to the file position at 'offset' */
268 if (!(flags & MAP_ANONYMOUS))
269 host_start += offset - host_offset;
270 start = h2g(host_start);
275 if (start & ~TARGET_PAGE_MASK) {
280 real_end = HOST_PAGE_ALIGN(end);
282 /* worst case: we cannot map the file because the offset is not
283 aligned, so we read it */
284 if (!(flags & MAP_ANONYMOUS) &&
285 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
286 /* msync() won't work here, so we return an error if write is
287 possible while it is a shared mapping */
288 if ((flags & MAP_TYPE) == MAP_SHARED &&
289 (prot & PROT_WRITE)) {
293 retaddr = target_mmap(start, len, prot | PROT_WRITE,
294 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
298 pread(fd, g2h(start), len, offset);
299 if (!(prot & PROT_WRITE)) {
300 ret = target_mprotect(start, len, prot);
307 /* handle the start of the mapping */
308 if (start > real_start) {
309 if (real_end == real_start + qemu_host_page_size) {
310 /* one single host page */
311 ret = mmap_frag(real_start, start, end,
312 prot, flags, fd, offset);
317 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
318 prot, flags, fd, offset);
321 real_start += qemu_host_page_size;
323 /* handle the end of the mapping */
324 if (end < real_end) {
325 ret = mmap_frag(real_end - qemu_host_page_size,
326 real_end - qemu_host_page_size, real_end,
328 offset + real_end - qemu_host_page_size - start);
331 real_end -= qemu_host_page_size;
334 /* map the middle (easier) */
335 if (real_start < real_end) {
336 unsigned long offset1;
337 if (flags & MAP_ANONYMOUS)
340 offset1 = offset + real_start - start;
341 ret = (long)mmap(g2h(real_start), real_end - real_start,
342 prot, flags, fd, offset1);
347 page_set_flags(start, start + len, prot | PAGE_VALID);
350 printf("ret=0x%llx\n", start);
357 int target_munmap(target_ulong start, target_ulong len)
359 target_ulong end, real_start, real_end, addr;
363 printf("munmap: start=0x%lx len=0x%lx\n", start, len);
365 if (start & ~TARGET_PAGE_MASK)
367 len = TARGET_PAGE_ALIGN(len);
371 real_start = start & qemu_host_page_mask;
372 real_end = HOST_PAGE_ALIGN(end);
374 if (start > real_start) {
375 /* handle host page containing start */
377 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
378 prot |= page_get_flags(addr);
380 if (real_end == real_start + qemu_host_page_size) {
381 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
382 prot |= page_get_flags(addr);
387 real_start += qemu_host_page_size;
389 if (end < real_end) {
391 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
392 prot |= page_get_flags(addr);
395 real_end -= qemu_host_page_size;
398 /* unmap what we can */
399 if (real_start < real_end) {
400 ret = munmap(g2h(real_start), real_end - real_start);
405 page_set_flags(start, start + len, 0);
409 /* XXX: currently, we only handle MAP_ANONYMOUS and not MAP_FIXED
410 blocks which have been allocated starting on a host page */
411 target_long target_mremap(target_ulong old_addr, target_ulong old_size,
412 target_ulong new_size, unsigned long flags,
413 target_ulong new_addr)
416 unsigned long host_addr;
418 /* XXX: use 5 args syscall */
419 host_addr = (long)mremap(g2h(old_addr), old_size, new_size, flags);
422 new_addr = h2g(host_addr);
423 prot = page_get_flags(old_addr);
424 page_set_flags(old_addr, old_addr + old_size, 0);
425 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
429 int target_msync(target_ulong start, target_ulong len, int flags)
433 if (start & ~TARGET_PAGE_MASK)
435 len = TARGET_PAGE_ALIGN(len);
442 start &= qemu_host_page_mask;
443 return msync(g2h(start), end - start, flags);