]> Git Repo - qemu.git/blame - linux-user/mmap.c
sm501: Do not clear read only bits when writing registers
[qemu.git] / linux-user / mmap.c
CommitLineData
54936004
FB
1/*
2 * mmap support for qemu
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
8167ee88 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
d39594e9 19#include "qemu/osdep.h"
54936004
FB
20
21#include "qemu.h"
78f5bf1e 22#include "qemu-common.h"
1652b974 23#include "translate-all.h"
54936004
FB
24
25//#define DEBUG_MMAP
26
1e6eec8b 27static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
dfd3f85c 28static __thread int mmap_lock_count;
c8a706fe
PB
29
30void mmap_lock(void)
31{
32 if (mmap_lock_count++ == 0) {
33 pthread_mutex_lock(&mmap_mutex);
34 }
35}
36
37void mmap_unlock(void)
38{
39 if (--mmap_lock_count == 0) {
40 pthread_mutex_unlock(&mmap_mutex);
41 }
42}
d5975363 43
301e40ed
AB
44bool have_mmap_lock(void)
45{
46 return mmap_lock_count > 0 ? true : false;
47}
48
d5975363
PB
49/* Grab lock to make sure things are in a consistent state after fork(). */
50void mmap_fork_start(void)
51{
52 if (mmap_lock_count)
53 abort();
54 pthread_mutex_lock(&mmap_mutex);
55}
56
57void mmap_fork_end(int child)
58{
59 if (child)
60 pthread_mutex_init(&mmap_mutex, NULL);
61 else
62 pthread_mutex_unlock(&mmap_mutex);
63}
c8a706fe 64
53a5960a 65/* NOTE: all the constants are the HOST ones, but addresses are target. */
992f48a0 66int target_mprotect(abi_ulong start, abi_ulong len, int prot)
54936004 67{
992f48a0 68 abi_ulong end, host_start, host_end, addr;
54936004
FB
69 int prot1, ret;
70
71#ifdef DEBUG_MMAP
0bf9e31a
BS
72 printf("mprotect: start=0x" TARGET_ABI_FMT_lx
73 "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
54936004
FB
74 prot & PROT_READ ? 'r' : '-',
75 prot & PROT_WRITE ? 'w' : '-',
76 prot & PROT_EXEC ? 'x' : '-');
77#endif
78
79 if ((start & ~TARGET_PAGE_MASK) != 0)
78cf3390 80 return -TARGET_EINVAL;
54936004
FB
81 len = TARGET_PAGE_ALIGN(len);
82 end = start + len;
ebf9a363 83 if (!guest_range_valid(start, len)) {
78cf3390 84 return -TARGET_ENOMEM;
ebf9a363 85 }
171cd1cd 86 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
54936004
FB
87 if (len == 0)
88 return 0;
3b46e624 89
c8a706fe 90 mmap_lock();
83fb7adf 91 host_start = start & qemu_host_page_mask;
54936004
FB
92 host_end = HOST_PAGE_ALIGN(end);
93 if (start > host_start) {
94 /* handle host page containing start */
95 prot1 = prot;
96 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
97 prot1 |= page_get_flags(addr);
98 }
83fb7adf 99 if (host_end == host_start + qemu_host_page_size) {
d418c81e
FB
100 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
101 prot1 |= page_get_flags(addr);
102 }
103 end = host_end;
104 }
53a5960a 105 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
54936004 106 if (ret != 0)
c8a706fe 107 goto error;
83fb7adf 108 host_start += qemu_host_page_size;
54936004
FB
109 }
110 if (end < host_end) {
54936004
FB
111 prot1 = prot;
112 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
113 prot1 |= page_get_flags(addr);
114 }
5fafdf24 115 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
54936004
FB
116 prot1 & PAGE_BITS);
117 if (ret != 0)
c8a706fe 118 goto error;
83fb7adf 119 host_end -= qemu_host_page_size;
54936004 120 }
3b46e624 121
54936004
FB
122 /* handle the pages in the middle */
123 if (host_start < host_end) {
53a5960a 124 ret = mprotect(g2h(host_start), host_end - host_start, prot);
54936004 125 if (ret != 0)
c8a706fe 126 goto error;
54936004 127 }
54936004 128 page_set_flags(start, start + len, prot | PAGE_VALID);
c8a706fe 129 mmap_unlock();
54936004 130 return 0;
c8a706fe
PB
131error:
132 mmap_unlock();
133 return ret;
54936004
FB
134}
135
136/* map an incomplete host page */
992f48a0
BS
137static int mmap_frag(abi_ulong real_start,
138 abi_ulong start, abi_ulong end,
139 int prot, int flags, int fd, abi_ulong offset)
54936004 140{
80210bcd 141 abi_ulong real_end, addr;
53a5960a 142 void *host_start;
54936004
FB
143 int prot1, prot_new;
144
53a5960a
PB
145 real_end = real_start + qemu_host_page_size;
146 host_start = g2h(real_start);
54936004
FB
147
148 /* get the protection of the target pages outside the mapping */
149 prot1 = 0;
53a5960a 150 for(addr = real_start; addr < real_end; addr++) {
54936004
FB
151 if (addr < start || addr >= end)
152 prot1 |= page_get_flags(addr);
153 }
3b46e624 154
54936004
FB
155 if (prot1 == 0) {
156 /* no page was there, so we allocate one */
80210bcd
TS
157 void *p = mmap(host_start, qemu_host_page_size, prot,
158 flags | MAP_ANONYMOUS, -1, 0);
159 if (p == MAP_FAILED)
160 return -1;
53a5960a 161 prot1 = prot;
54936004
FB
162 }
163 prot1 &= PAGE_BITS;
164
165 prot_new = prot | prot1;
166 if (!(flags & MAP_ANONYMOUS)) {
167 /* msync() won't work here, so we return an error if write is
168 possible while it is a shared mapping */
169 if ((flags & MAP_TYPE) == MAP_SHARED &&
170 (prot & PROT_WRITE))
ee636500 171 return -1;
54936004
FB
172
173 /* adjust protection to be able to read */
174 if (!(prot1 & PROT_WRITE))
53a5960a 175 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
3b46e624 176
54936004 177 /* read the corresponding file data */
fb7e378c
KS
178 if (pread(fd, g2h(start), end - start, offset) == -1)
179 return -1;
3b46e624 180
54936004
FB
181 /* put final protection */
182 if (prot_new != (prot1 | PROT_WRITE))
53a5960a 183 mprotect(host_start, qemu_host_page_size, prot_new);
54936004 184 } else {
54936004 185 if (prot_new != prot1) {
53a5960a 186 mprotect(host_start, qemu_host_page_size, prot_new);
54936004 187 }
e6deac9c
CG
188 if (prot_new & PROT_WRITE) {
189 memset(g2h(start), 0, end - start);
190 }
54936004
FB
191 }
192 return 0;
193}
194
14f24e14
RH
195#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
196# define TASK_UNMAPPED_BASE (1ul << 38)
a03e2d42 197#else
14f24e14 198# define TASK_UNMAPPED_BASE 0x40000000
a03e2d42 199#endif
59e9d91c 200abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
a03e2d42 201
0776590d
PB
202unsigned long last_brk;
203
68a1c816
PB
204/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
205 of guest address space. */
206static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
207{
208 abi_ulong addr;
59e9d91c 209 abi_ulong end_addr;
68a1c816
PB
210 int prot;
211 int looped = 0;
212
b76f21a7 213 if (size > reserved_va) {
68a1c816
PB
214 return (abi_ulong)-1;
215 }
216
59e9d91c
PM
217 size = HOST_PAGE_ALIGN(size);
218 end_addr = start + size;
b76f21a7
LV
219 if (end_addr > reserved_va) {
220 end_addr = reserved_va;
59e9d91c
PM
221 }
222 addr = end_addr - qemu_host_page_size;
223
224 while (1) {
225 if (addr > end_addr) {
68a1c816
PB
226 if (looped) {
227 return (abi_ulong)-1;
228 }
b76f21a7 229 end_addr = reserved_va;
59e9d91c 230 addr = end_addr - qemu_host_page_size;
68a1c816
PB
231 looped = 1;
232 continue;
233 }
234 prot = page_get_flags(addr);
235 if (prot) {
59e9d91c
PM
236 end_addr = addr;
237 }
95e6d430 238 if (addr && addr + size == end_addr) {
59e9d91c 239 break;
68a1c816 240 }
59e9d91c 241 addr -= qemu_host_page_size;
68a1c816 242 }
59e9d91c
PM
243
244 if (start == mmap_next_start) {
245 mmap_next_start = addr;
246 }
247
248 return addr;
68a1c816
PB
249}
250
fe3b4152
KS
251/*
252 * Find and reserve a free memory area of size 'size'. The search
253 * starts at 'start'.
254 * It must be called with mmap_lock() held.
255 * Return -1 if error.
256 */
9ad197d9 257abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
a03e2d42 258{
14f24e14 259 void *ptr, *prev;
fe3b4152 260 abi_ulong addr;
14f24e14 261 int wrapped, repeat;
fe3b4152
KS
262
263 /* If 'start' == 0, then a default start address is used. */
14f24e14 264 if (start == 0) {
fe3b4152 265 start = mmap_next_start;
14f24e14
RH
266 } else {
267 start &= qemu_host_page_mask;
268 }
269
270 size = HOST_PAGE_ALIGN(size);
fe3b4152 271
b76f21a7 272 if (reserved_va) {
68a1c816
PB
273 return mmap_find_vma_reserved(start, size);
274 }
275
a03e2d42 276 addr = start;
14f24e14
RH
277 wrapped = repeat = 0;
278 prev = 0;
fe3b4152 279
14f24e14 280 for (;; prev = ptr) {
fe3b4152
KS
281 /*
282 * Reserve needed memory area to avoid a race.
283 * It should be discarded using:
284 * - mmap() with MAP_FIXED flag
285 * - mremap() with MREMAP_FIXED flag
286 * - shmat() with SHM_REMAP flag
287 */
14f24e14 288 ptr = mmap(g2h(addr), size, PROT_NONE,
fe3b4152
KS
289 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
290
291 /* ENOMEM, if host address space has no memory */
14f24e14 292 if (ptr == MAP_FAILED) {
fe3b4152 293 return (abi_ulong)-1;
14f24e14
RH
294 }
295
296 /* Count the number of sequential returns of the same address.
297 This is used to modify the search algorithm below. */
298 repeat = (ptr == prev ? repeat + 1 : 0);
299
300 if (h2g_valid(ptr + size - 1)) {
301 addr = h2g(ptr);
fe3b4152 302
14f24e14
RH
303 if ((addr & ~TARGET_PAGE_MASK) == 0) {
304 /* Success. */
305 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
306 mmap_next_start = addr + size;
307 }
308 return addr;
309 }
fe3b4152 310
14f24e14
RH
311 /* The address is not properly aligned for the target. */
312 switch (repeat) {
313 case 0:
314 /* Assume the result that the kernel gave us is the
315 first with enough free space, so start again at the
316 next higher target page. */
317 addr = TARGET_PAGE_ALIGN(addr);
318 break;
319 case 1:
320 /* Sometimes the kernel decides to perform the allocation
321 at the top end of memory instead. */
322 addr &= TARGET_PAGE_MASK;
323 break;
324 case 2:
325 /* Start over at low memory. */
326 addr = 0;
327 break;
328 default:
329 /* Fail. This unaligned block must the last. */
330 addr = -1;
331 break;
332 }
333 } else {
334 /* Since the result the kernel gave didn't fit, start
335 again at low memory. If any repetition, fail. */
336 addr = (repeat ? -1 : 0);
337 }
338
339 /* Unmap and try again. */
fe3b4152 340 munmap(ptr, size);
fe3b4152 341
14f24e14 342 /* ENOMEM if we checked the whole of the target address space. */
d0b3e4f5 343 if (addr == (abi_ulong)-1) {
a03e2d42 344 return (abi_ulong)-1;
14f24e14
RH
345 } else if (addr == 0) {
346 if (wrapped) {
347 return (abi_ulong)-1;
348 }
349 wrapped = 1;
350 /* Don't actually use 0 when wrapping, instead indicate
8186e783 351 that we'd truly like an allocation in low memory. */
14f24e14
RH
352 addr = (mmap_min_addr > TARGET_PAGE_SIZE
353 ? TARGET_PAGE_ALIGN(mmap_min_addr)
354 : TARGET_PAGE_SIZE);
355 } else if (wrapped && addr >= start) {
356 return (abi_ulong)-1;
357 }
a03e2d42 358 }
a03e2d42
FB
359}
360
54936004 361/* NOTE: all the constants are the HOST ones */
992f48a0
BS
362abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
363 int flags, int fd, abi_ulong offset)
54936004 364{
992f48a0 365 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
54936004 366
c8a706fe 367 mmap_lock();
54936004
FB
368#ifdef DEBUG_MMAP
369 {
0bf9e31a
BS
370 printf("mmap: start=0x" TARGET_ABI_FMT_lx
371 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
5fafdf24 372 start, len,
54936004
FB
373 prot & PROT_READ ? 'r' : '-',
374 prot & PROT_WRITE ? 'w' : '-',
375 prot & PROT_EXEC ? 'x' : '-');
376 if (flags & MAP_FIXED)
377 printf("MAP_FIXED ");
378 if (flags & MAP_ANONYMOUS)
379 printf("MAP_ANON ");
380 switch(flags & MAP_TYPE) {
381 case MAP_PRIVATE:
382 printf("MAP_PRIVATE ");
383 break;
384 case MAP_SHARED:
385 printf("MAP_SHARED ");
386 break;
387 default:
388 printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
389 break;
390 }
0bf9e31a 391 printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
54936004
FB
392 }
393#endif
394
e89f07d3
PB
395 if (offset & ~TARGET_PAGE_MASK) {
396 errno = EINVAL;
c8a706fe 397 goto fail;
e89f07d3 398 }
54936004
FB
399
400 len = TARGET_PAGE_ALIGN(len);
401 if (len == 0)
c8a706fe 402 goto the_end;
53a5960a 403 real_start = start & qemu_host_page_mask;
a5e7ee46
RH
404 host_offset = offset & qemu_host_page_mask;
405
406 /* If the user is asking for the kernel to find a location, do that
407 before we truncate the length for mapping files below. */
408 if (!(flags & MAP_FIXED)) {
409 host_len = len + offset - host_offset;
410 host_len = HOST_PAGE_ALIGN(host_len);
411 start = mmap_find_vma(real_start, host_len);
412 if (start == (abi_ulong)-1) {
413 errno = ENOMEM;
414 goto fail;
415 }
416 }
54936004 417
54c5a2ae
EI
418 /* When mapping files into a memory area larger than the file, accesses
419 to pages beyond the file size will cause a SIGBUS.
420
421 For example, if mmaping a file of 100 bytes on a host with 4K pages
422 emulating a target with 8K pages, the target expects to be able to
423 access the first 8K. But the host will trap us on any access beyond
424 4K.
425
426 When emulating a target with a larger page-size than the hosts, we
427 may need to truncate file maps at EOF and add extra anonymous pages
428 up to the targets page boundary. */
429
35f2fd04
MAL
430 if ((qemu_real_host_page_size < qemu_host_page_size) &&
431 !(flags & MAP_ANONYMOUS)) {
432 struct stat sb;
54c5a2ae
EI
433
434 if (fstat (fd, &sb) == -1)
435 goto fail;
436
437 /* Are we trying to create a map beyond EOF?. */
438 if (offset + len > sb.st_size) {
439 /* If so, truncate the file map at eof aligned with
440 the hosts real pagesize. Additional anonymous maps
441 will be created beyond EOF. */
0c2d70c4 442 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
54c5a2ae
EI
443 }
444 }
445
54936004 446 if (!(flags & MAP_FIXED)) {
a5e7ee46 447 unsigned long host_start;
a03e2d42 448 void *p;
a5e7ee46 449
a03e2d42
FB
450 host_len = len + offset - host_offset;
451 host_len = HOST_PAGE_ALIGN(host_len);
a5e7ee46 452
a03e2d42
FB
453 /* Note: we prefer to control the mapping address. It is
454 especially important if qemu_host_page_size >
455 qemu_real_host_page_size */
a5e7ee46
RH
456 p = mmap(g2h(start), host_len, prot,
457 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
a03e2d42 458 if (p == MAP_FAILED)
c8a706fe 459 goto fail;
a03e2d42
FB
460 /* update start so that it points to the file position at 'offset' */
461 host_start = (unsigned long)p;
54c5a2ae 462 if (!(flags & MAP_ANONYMOUS)) {
a5e7ee46 463 p = mmap(g2h(start), len, prot,
54c5a2ae 464 flags | MAP_FIXED, fd, host_offset);
8384274e
JB
465 if (p == MAP_FAILED) {
466 munmap(g2h(start), host_len);
467 goto fail;
468 }
a03e2d42 469 host_start += offset - host_offset;
54c5a2ae 470 }
a03e2d42
FB
471 start = h2g(host_start);
472 } else {
473 if (start & ~TARGET_PAGE_MASK) {
e89f07d3 474 errno = EINVAL;
c8a706fe 475 goto fail;
e89f07d3 476 }
a03e2d42
FB
477 end = start + len;
478 real_end = HOST_PAGE_ALIGN(end);
7ab240ad 479
45bc1f52
AJ
480 /*
481 * Test if requested memory area fits target address space
482 * It can fail only on 64-bit host with 32-bit target.
483 * On any other target/host host mmap() handles this error correctly.
484 */
ebf9a363
MF
485 if (!guest_range_valid(start, len)) {
486 errno = ENOMEM;
45bc1f52
AJ
487 goto fail;
488 }
489
a03e2d42
FB
490 /* worst case: we cannot map the file because the offset is not
491 aligned, so we read it */
492 if (!(flags & MAP_ANONYMOUS) &&
493 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
494 /* msync() won't work here, so we return an error if write is
495 possible while it is a shared mapping */
496 if ((flags & MAP_TYPE) == MAP_SHARED &&
497 (prot & PROT_WRITE)) {
498 errno = EINVAL;
c8a706fe 499 goto fail;
a03e2d42
FB
500 }
501 retaddr = target_mmap(start, len, prot | PROT_WRITE,
502 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
503 -1, 0);
504 if (retaddr == -1)
c8a706fe 505 goto fail;
fb7e378c
KS
506 if (pread(fd, g2h(start), len, offset) == -1)
507 goto fail;
a03e2d42
FB
508 if (!(prot & PROT_WRITE)) {
509 ret = target_mprotect(start, len, prot);
86abac06 510 assert(ret == 0);
a03e2d42
FB
511 }
512 goto the_end;
54936004 513 }
a03e2d42
FB
514
515 /* handle the start of the mapping */
516 if (start > real_start) {
517 if (real_end == real_start + qemu_host_page_size) {
518 /* one single host page */
519 ret = mmap_frag(real_start, start, end,
520 prot, flags, fd, offset);
521 if (ret == -1)
c8a706fe 522 goto fail;
a03e2d42
FB
523 goto the_end1;
524 }
525 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
54936004
FB
526 prot, flags, fd, offset);
527 if (ret == -1)
c8a706fe 528 goto fail;
a03e2d42
FB
529 real_start += qemu_host_page_size;
530 }
531 /* handle the end of the mapping */
532 if (end < real_end) {
533 ret = mmap_frag(real_end - qemu_host_page_size,
530c0032 534 real_end - qemu_host_page_size, end,
a03e2d42
FB
535 prot, flags, fd,
536 offset + real_end - qemu_host_page_size - start);
537 if (ret == -1)
c8a706fe 538 goto fail;
a03e2d42 539 real_end -= qemu_host_page_size;
54936004 540 }
3b46e624 541
a03e2d42
FB
542 /* map the middle (easier) */
543 if (real_start < real_end) {
544 void *p;
545 unsigned long offset1;
546 if (flags & MAP_ANONYMOUS)
547 offset1 = 0;
548 else
549 offset1 = offset + real_start - start;
550 p = mmap(g2h(real_start), real_end - real_start,
551 prot, flags, fd, offset1);
552 if (p == MAP_FAILED)
c8a706fe 553 goto fail;
a03e2d42 554 }
54936004
FB
555 }
556 the_end1:
557 page_set_flags(start, start + len, prot | PAGE_VALID);
558 the_end:
559#ifdef DEBUG_MMAP
0bf9e31a 560 printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
54936004
FB
561 page_dump(stdout);
562 printf("\n");
563#endif
35865339 564 tb_invalidate_phys_range(start, start + len);
c8a706fe 565 mmap_unlock();
54936004 566 return start;
c8a706fe
PB
567fail:
568 mmap_unlock();
569 return -1;
54936004
FB
570}
571
68a1c816
PB
572static void mmap_reserve(abi_ulong start, abi_ulong size)
573{
574 abi_ulong real_start;
575 abi_ulong real_end;
576 abi_ulong addr;
577 abi_ulong end;
578 int prot;
579
580 real_start = start & qemu_host_page_mask;
581 real_end = HOST_PAGE_ALIGN(start + size);
582 end = start + size;
583 if (start > real_start) {
584 /* handle host page containing start */
585 prot = 0;
586 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
587 prot |= page_get_flags(addr);
588 }
589 if (real_end == real_start + qemu_host_page_size) {
590 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
591 prot |= page_get_flags(addr);
592 }
593 end = real_end;
594 }
595 if (prot != 0)
596 real_start += qemu_host_page_size;
597 }
598 if (end < real_end) {
599 prot = 0;
600 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
601 prot |= page_get_flags(addr);
602 }
603 if (prot != 0)
604 real_end -= qemu_host_page_size;
605 }
606 if (real_start != real_end) {
607 mmap(g2h(real_start), real_end - real_start, PROT_NONE,
608 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
609 -1, 0);
610 }
611}
612
992f48a0 613int target_munmap(abi_ulong start, abi_ulong len)
54936004 614{
992f48a0 615 abi_ulong end, real_start, real_end, addr;
54936004
FB
616 int prot, ret;
617
618#ifdef DEBUG_MMAP
0bf9e31a
BS
619 printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
620 TARGET_ABI_FMT_lx "\n",
621 start, len);
54936004
FB
622#endif
623 if (start & ~TARGET_PAGE_MASK)
78cf3390 624 return -TARGET_EINVAL;
54936004 625 len = TARGET_PAGE_ALIGN(len);
ebf9a363 626 if (len == 0 || !guest_range_valid(start, len)) {
78cf3390 627 return -TARGET_EINVAL;
ebf9a363
MF
628 }
629
c8a706fe 630 mmap_lock();
54936004 631 end = start + len;
53a5960a
PB
632 real_start = start & qemu_host_page_mask;
633 real_end = HOST_PAGE_ALIGN(end);
54936004 634
53a5960a 635 if (start > real_start) {
54936004
FB
636 /* handle host page containing start */
637 prot = 0;
53a5960a 638 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
54936004
FB
639 prot |= page_get_flags(addr);
640 }
53a5960a
PB
641 if (real_end == real_start + qemu_host_page_size) {
642 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
d418c81e
FB
643 prot |= page_get_flags(addr);
644 }
53a5960a 645 end = real_end;
d418c81e 646 }
54936004 647 if (prot != 0)
53a5960a 648 real_start += qemu_host_page_size;
54936004 649 }
53a5960a 650 if (end < real_end) {
54936004 651 prot = 0;
53a5960a 652 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
54936004
FB
653 prot |= page_get_flags(addr);
654 }
655 if (prot != 0)
53a5960a 656 real_end -= qemu_host_page_size;
54936004 657 }
3b46e624 658
c8a706fe 659 ret = 0;
54936004 660 /* unmap what we can */
53a5960a 661 if (real_start < real_end) {
b76f21a7 662 if (reserved_va) {
68a1c816
PB
663 mmap_reserve(real_start, real_end - real_start);
664 } else {
665 ret = munmap(g2h(real_start), real_end - real_start);
666 }
54936004
FB
667 }
668
77a8f1a5 669 if (ret == 0) {
c8a706fe 670 page_set_flags(start, start + len, 0);
35865339 671 tb_invalidate_phys_range(start, start + len);
77a8f1a5 672 }
c8a706fe
PB
673 mmap_unlock();
674 return ret;
54936004
FB
675}
676
992f48a0
BS
677abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
678 abi_ulong new_size, unsigned long flags,
679 abi_ulong new_addr)
54936004
FB
680{
681 int prot;
f19412a2 682 void *host_addr;
54936004 683
ebf9a363
MF
684 if (!guest_range_valid(old_addr, old_size) ||
685 ((flags & MREMAP_FIXED) &&
686 !guest_range_valid(new_addr, new_size))) {
687 errno = ENOMEM;
688 return -1;
689 }
690
c8a706fe 691 mmap_lock();
f19412a2 692
68a1c816 693 if (flags & MREMAP_FIXED) {
52956a9b
FJ
694 host_addr = mremap(g2h(old_addr), old_size, new_size,
695 flags, g2h(new_addr));
68a1c816 696
b76f21a7 697 if (reserved_va && host_addr != MAP_FAILED) {
68a1c816
PB
698 /* If new and old addresses overlap then the above mremap will
699 already have failed with EINVAL. */
700 mmap_reserve(old_addr, old_size);
701 }
702 } else if (flags & MREMAP_MAYMOVE) {
f19412a2
AJ
703 abi_ulong mmap_start;
704
705 mmap_start = mmap_find_vma(0, new_size);
706
707 if (mmap_start == -1) {
708 errno = ENOMEM;
709 host_addr = MAP_FAILED;
68a1c816 710 } else {
52956a9b
FJ
711 host_addr = mremap(g2h(old_addr), old_size, new_size,
712 flags | MREMAP_FIXED, g2h(mmap_start));
b76f21a7 713 if (reserved_va) {
c65ffe6d 714 mmap_reserve(old_addr, old_size);
715 }
68a1c816 716 }
3af72a4d 717 } else {
68a1c816 718 int prot = 0;
b76f21a7 719 if (reserved_va && old_size < new_size) {
68a1c816
PB
720 abi_ulong addr;
721 for (addr = old_addr + old_size;
722 addr < old_addr + new_size;
723 addr++) {
724 prot |= page_get_flags(addr);
725 }
726 }
727 if (prot == 0) {
728 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
b76f21a7 729 if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
68a1c816
PB
730 mmap_reserve(old_addr + old_size, new_size - old_size);
731 }
732 } else {
733 errno = ENOMEM;
734 host_addr = MAP_FAILED;
735 }
f19412a2
AJ
736 /* Check if address fits target address space */
737 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
738 /* Revert mremap() changes */
739 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
740 errno = ENOMEM;
741 host_addr = MAP_FAILED;
742 }
743 }
744
745 if (host_addr == MAP_FAILED) {
c8a706fe
PB
746 new_addr = -1;
747 } else {
748 new_addr = h2g(host_addr);
749 prot = page_get_flags(old_addr);
750 page_set_flags(old_addr, old_addr + old_size, 0);
751 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
752 }
35865339 753 tb_invalidate_phys_range(new_addr, new_addr + new_size);
c8a706fe 754 mmap_unlock();
54936004
FB
755 return new_addr;
756}
This page took 0.881567 seconds and 4 git commands to generate.