4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
46 #include <linux/capability.h>
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
53 #include <sys/socket.h>
57 #include <sys/times.h>
60 #include <sys/statfs.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
73 #include <sys/eventfd.h>
76 #include <sys/epoll.h>
79 #include "qemu/xattr.h"
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/cdrom.h>
95 #include <linux/hdreg.h>
96 #include <linux/soundcard.h>
98 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include "linux_loop.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
120 //#include <linux/msdos_fs.h>
121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
133 #define _syscall0(type,name) \
134 static type name (void) \
136 return syscall(__NR_##name); \
139 #define _syscall1(type,name,type1,arg1) \
140 static type name (type1 arg1) \
142 return syscall(__NR_##name, arg1); \
145 #define _syscall2(type,name,type1,arg1,type2,arg2) \
146 static type name (type1 arg1,type2 arg2) \
148 return syscall(__NR_##name, arg1, arg2); \
151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
152 static type name (type1 arg1,type2 arg2,type3 arg3) \
154 return syscall(__NR_##name, arg1, arg2, arg3); \
157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172 type5,arg5,type6,arg6) \
173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
180 #define __NR_sys_uname __NR_uname
181 #define __NR_sys_getcwd1 __NR_getcwd
182 #define __NR_sys_getdents __NR_getdents
183 #define __NR_sys_getdents64 __NR_getdents64
184 #define __NR_sys_getpriority __NR_getpriority
185 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
186 #define __NR_sys_syslog __NR_syslog
187 #define __NR_sys_tgkill __NR_tgkill
188 #define __NR_sys_tkill __NR_tkill
189 #define __NR_sys_futex __NR_futex
190 #define __NR_sys_inotify_init __NR_inotify_init
191 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
192 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
194 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
196 #define __NR__llseek __NR_lseek
199 /* Newer kernel ports have llseek() instead of _llseek() */
200 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
201 #define TARGET_NR__llseek TARGET_NR_llseek
205 _syscall0(int, gettid)
207 /* This is a replacement for the host gettid() and must return a host
209 static int gettid(void) {
214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
216 #if !defined(__NR_getdents) || \
217 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
218 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
220 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
221 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
222 loff_t *, res, uint, wh);
224 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
225 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
226 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
227 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
229 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
230 _syscall2(int,sys_tkill,int,tid,int,sig)
232 #ifdef __NR_exit_group
233 _syscall1(int,exit_group,int,error_code)
235 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
236 _syscall1(int,set_tid_address,int *,tidptr)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
242 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
243 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
244 unsigned long *, user_mask_ptr);
245 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
246 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
250 _syscall2(int, capget, struct __user_cap_header_struct *, header,
251 struct __user_cap_data_struct *, data);
252 _syscall2(int, capset, struct __user_cap_header_struct *, header,
253 struct __user_cap_data_struct *, data);
255 static bitmask_transtbl fcntl_flags_tbl[] = {
256 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
257 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
258 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
259 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
260 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
261 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
262 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
263 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
264 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
265 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
266 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
267 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
268 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
272 #if defined(O_NOATIME)
273 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
275 #if defined(O_CLOEXEC)
276 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
279 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
281 /* Don't terminate the list prematurely on 64-bit host+guest. */
282 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
283 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
288 static int sys_getcwd1(char *buf, size_t size)
290 if (getcwd(buf, size) == NULL) {
291 /* getcwd() sets errno */
294 return strlen(buf)+1;
297 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
300 * open(2) has extra parameter 'mode' when called with
303 if ((flags & O_CREAT) != 0) {
304 return (openat(dirfd, pathname, flags, mode));
306 return (openat(dirfd, pathname, flags));
309 #ifdef TARGET_NR_utimensat
310 #ifdef CONFIG_UTIMENSAT
311 static int sys_utimensat(int dirfd, const char *pathname,
312 const struct timespec times[2], int flags)
314 if (pathname == NULL)
315 return futimens(dirfd, times);
317 return utimensat(dirfd, pathname, times, flags);
319 #elif defined(__NR_utimensat)
320 #define __NR_sys_utimensat __NR_utimensat
321 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
322 const struct timespec *,tsp,int,flags)
324 static int sys_utimensat(int dirfd, const char *pathname,
325 const struct timespec times[2], int flags)
331 #endif /* TARGET_NR_utimensat */
333 #ifdef CONFIG_INOTIFY
334 #include <sys/inotify.h>
336 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
337 static int sys_inotify_init(void)
339 return (inotify_init());
342 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
343 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
345 return (inotify_add_watch(fd, pathname, mask));
348 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
349 static int sys_inotify_rm_watch(int fd, int32_t wd)
351 return (inotify_rm_watch(fd, wd));
354 #ifdef CONFIG_INOTIFY1
355 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
356 static int sys_inotify_init1(int flags)
358 return (inotify_init1(flags));
363 /* Userspace can usually survive runtime without inotify */
364 #undef TARGET_NR_inotify_init
365 #undef TARGET_NR_inotify_init1
366 #undef TARGET_NR_inotify_add_watch
367 #undef TARGET_NR_inotify_rm_watch
368 #endif /* CONFIG_INOTIFY */
370 #if defined(TARGET_NR_ppoll)
372 # define __NR_ppoll -1
374 #define __NR_sys_ppoll __NR_ppoll
375 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
376 struct timespec *, timeout, const sigset_t *, sigmask,
380 #if defined(TARGET_NR_pselect6)
381 #ifndef __NR_pselect6
382 # define __NR_pselect6 -1
384 #define __NR_sys_pselect6 __NR_pselect6
385 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
386 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
389 #if defined(TARGET_NR_prlimit64)
390 #ifndef __NR_prlimit64
391 # define __NR_prlimit64 -1
393 #define __NR_sys_prlimit64 __NR_prlimit64
394 /* The glibc rlimit structure may not be that used by the underlying syscall */
395 struct host_rlimit64 {
399 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
400 const struct host_rlimit64 *, new_limit,
401 struct host_rlimit64 *, old_limit)
405 #if defined(TARGET_NR_timer_create)
406 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
407 static timer_t g_posix_timers[32] = { 0, } ;
409 static inline int next_free_host_timer(void)
412 /* FIXME: Does finding the next free slot require a lock? */
413 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
414 if (g_posix_timers[k] == 0) {
415 g_posix_timers[k] = (timer_t) 1;
423 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
425 static inline int regpairs_aligned(void *cpu_env) {
426 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
428 #elif defined(TARGET_MIPS)
429 static inline int regpairs_aligned(void *cpu_env) { return 1; }
430 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
431 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
432 * of registers which translates to the same as ARM/MIPS, because we start with
434 static inline int regpairs_aligned(void *cpu_env) { return 1; }
436 static inline int regpairs_aligned(void *cpu_env) { return 0; }
439 #define ERRNO_TABLE_SIZE 1200
441 /* target_to_host_errno_table[] is initialized from
442 * host_to_target_errno_table[] in syscall_init(). */
443 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
447 * This list is the union of errno values overridden in asm-<arch>/errno.h
448 * minus the errnos that are not actually generic to all archs.
450 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
451 [EIDRM] = TARGET_EIDRM,
452 [ECHRNG] = TARGET_ECHRNG,
453 [EL2NSYNC] = TARGET_EL2NSYNC,
454 [EL3HLT] = TARGET_EL3HLT,
455 [EL3RST] = TARGET_EL3RST,
456 [ELNRNG] = TARGET_ELNRNG,
457 [EUNATCH] = TARGET_EUNATCH,
458 [ENOCSI] = TARGET_ENOCSI,
459 [EL2HLT] = TARGET_EL2HLT,
460 [EDEADLK] = TARGET_EDEADLK,
461 [ENOLCK] = TARGET_ENOLCK,
462 [EBADE] = TARGET_EBADE,
463 [EBADR] = TARGET_EBADR,
464 [EXFULL] = TARGET_EXFULL,
465 [ENOANO] = TARGET_ENOANO,
466 [EBADRQC] = TARGET_EBADRQC,
467 [EBADSLT] = TARGET_EBADSLT,
468 [EBFONT] = TARGET_EBFONT,
469 [ENOSTR] = TARGET_ENOSTR,
470 [ENODATA] = TARGET_ENODATA,
471 [ETIME] = TARGET_ETIME,
472 [ENOSR] = TARGET_ENOSR,
473 [ENONET] = TARGET_ENONET,
474 [ENOPKG] = TARGET_ENOPKG,
475 [EREMOTE] = TARGET_EREMOTE,
476 [ENOLINK] = TARGET_ENOLINK,
477 [EADV] = TARGET_EADV,
478 [ESRMNT] = TARGET_ESRMNT,
479 [ECOMM] = TARGET_ECOMM,
480 [EPROTO] = TARGET_EPROTO,
481 [EDOTDOT] = TARGET_EDOTDOT,
482 [EMULTIHOP] = TARGET_EMULTIHOP,
483 [EBADMSG] = TARGET_EBADMSG,
484 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
485 [EOVERFLOW] = TARGET_EOVERFLOW,
486 [ENOTUNIQ] = TARGET_ENOTUNIQ,
487 [EBADFD] = TARGET_EBADFD,
488 [EREMCHG] = TARGET_EREMCHG,
489 [ELIBACC] = TARGET_ELIBACC,
490 [ELIBBAD] = TARGET_ELIBBAD,
491 [ELIBSCN] = TARGET_ELIBSCN,
492 [ELIBMAX] = TARGET_ELIBMAX,
493 [ELIBEXEC] = TARGET_ELIBEXEC,
494 [EILSEQ] = TARGET_EILSEQ,
495 [ENOSYS] = TARGET_ENOSYS,
496 [ELOOP] = TARGET_ELOOP,
497 [ERESTART] = TARGET_ERESTART,
498 [ESTRPIPE] = TARGET_ESTRPIPE,
499 [ENOTEMPTY] = TARGET_ENOTEMPTY,
500 [EUSERS] = TARGET_EUSERS,
501 [ENOTSOCK] = TARGET_ENOTSOCK,
502 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
503 [EMSGSIZE] = TARGET_EMSGSIZE,
504 [EPROTOTYPE] = TARGET_EPROTOTYPE,
505 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
506 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
507 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
508 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
509 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
510 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
511 [EADDRINUSE] = TARGET_EADDRINUSE,
512 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
513 [ENETDOWN] = TARGET_ENETDOWN,
514 [ENETUNREACH] = TARGET_ENETUNREACH,
515 [ENETRESET] = TARGET_ENETRESET,
516 [ECONNABORTED] = TARGET_ECONNABORTED,
517 [ECONNRESET] = TARGET_ECONNRESET,
518 [ENOBUFS] = TARGET_ENOBUFS,
519 [EISCONN] = TARGET_EISCONN,
520 [ENOTCONN] = TARGET_ENOTCONN,
521 [EUCLEAN] = TARGET_EUCLEAN,
522 [ENOTNAM] = TARGET_ENOTNAM,
523 [ENAVAIL] = TARGET_ENAVAIL,
524 [EISNAM] = TARGET_EISNAM,
525 [EREMOTEIO] = TARGET_EREMOTEIO,
526 [ESHUTDOWN] = TARGET_ESHUTDOWN,
527 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
528 [ETIMEDOUT] = TARGET_ETIMEDOUT,
529 [ECONNREFUSED] = TARGET_ECONNREFUSED,
530 [EHOSTDOWN] = TARGET_EHOSTDOWN,
531 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
532 [EALREADY] = TARGET_EALREADY,
533 [EINPROGRESS] = TARGET_EINPROGRESS,
534 [ESTALE] = TARGET_ESTALE,
535 [ECANCELED] = TARGET_ECANCELED,
536 [ENOMEDIUM] = TARGET_ENOMEDIUM,
537 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
539 [ENOKEY] = TARGET_ENOKEY,
542 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
545 [EKEYREVOKED] = TARGET_EKEYREVOKED,
548 [EKEYREJECTED] = TARGET_EKEYREJECTED,
551 [EOWNERDEAD] = TARGET_EOWNERDEAD,
553 #ifdef ENOTRECOVERABLE
554 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
558 static inline int host_to_target_errno(int err)
560 if(host_to_target_errno_table[err])
561 return host_to_target_errno_table[err];
565 static inline int target_to_host_errno(int err)
567 if (target_to_host_errno_table[err])
568 return target_to_host_errno_table[err];
572 static inline abi_long get_errno(abi_long ret)
575 return -host_to_target_errno(errno);
580 static inline int is_error(abi_long ret)
582 return (abi_ulong)ret >= (abi_ulong)(-4096);
585 char *target_strerror(int err)
587 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
590 return strerror(target_to_host_errno(err));
593 static inline int host_to_target_sock_type(int host_type)
597 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
599 target_type = TARGET_SOCK_DGRAM;
602 target_type = TARGET_SOCK_STREAM;
605 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
609 #if defined(SOCK_CLOEXEC)
610 if (host_type & SOCK_CLOEXEC) {
611 target_type |= TARGET_SOCK_CLOEXEC;
615 #if defined(SOCK_NONBLOCK)
616 if (host_type & SOCK_NONBLOCK) {
617 target_type |= TARGET_SOCK_NONBLOCK;
624 static abi_ulong target_brk;
625 static abi_ulong target_original_brk;
626 static abi_ulong brk_page;
628 void target_set_brk(abi_ulong new_brk)
630 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
631 brk_page = HOST_PAGE_ALIGN(target_brk);
634 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
635 #define DEBUGF_BRK(message, args...)
637 /* do_brk() must return target values and target errnos. */
638 abi_long do_brk(abi_ulong new_brk)
640 abi_long mapped_addr;
643 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
646 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
649 if (new_brk < target_original_brk) {
650 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
655 /* If the new brk is less than the highest page reserved to the
656 * target heap allocation, set it and we're almost done... */
657 if (new_brk <= brk_page) {
658 /* Heap contents are initialized to zero, as for anonymous
660 if (new_brk > target_brk) {
661 memset(g2h(target_brk), 0, new_brk - target_brk);
663 target_brk = new_brk;
664 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
668 /* We need to allocate more memory after the brk... Note that
669 * we don't use MAP_FIXED because that will map over the top of
670 * any existing mapping (like the one with the host libc or qemu
671 * itself); instead we treat "mapped but at wrong address" as
672 * a failure and unmap again.
674 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
675 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
676 PROT_READ|PROT_WRITE,
677 MAP_ANON|MAP_PRIVATE, 0, 0));
679 if (mapped_addr == brk_page) {
680 /* Heap contents are initialized to zero, as for anonymous
681 * mapped pages. Technically the new pages are already
682 * initialized to zero since they *are* anonymous mapped
683 * pages, however we have to take care with the contents that
684 * come from the remaining part of the previous page: it may
685 * contains garbage data due to a previous heap usage (grown
687 memset(g2h(target_brk), 0, brk_page - target_brk);
689 target_brk = new_brk;
690 brk_page = HOST_PAGE_ALIGN(target_brk);
691 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
694 } else if (mapped_addr != -1) {
695 /* Mapped but at wrong address, meaning there wasn't actually
696 * enough space for this brk.
698 target_munmap(mapped_addr, new_alloc_size);
700 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
703 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
706 #if defined(TARGET_ALPHA)
707 /* We (partially) emulate OSF/1 on Alpha, which requires we
708 return a proper errno, not an unchanged brk value. */
709 return -TARGET_ENOMEM;
711 /* For everything else, return the previous break. */
715 static inline abi_long copy_from_user_fdset(fd_set *fds,
716 abi_ulong target_fds_addr,
720 abi_ulong b, *target_fds;
722 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
723 if (!(target_fds = lock_user(VERIFY_READ,
725 sizeof(abi_ulong) * nw,
727 return -TARGET_EFAULT;
731 for (i = 0; i < nw; i++) {
732 /* grab the abi_ulong */
733 __get_user(b, &target_fds[i]);
734 for (j = 0; j < TARGET_ABI_BITS; j++) {
735 /* check the bit inside the abi_ulong */
742 unlock_user(target_fds, target_fds_addr, 0);
747 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
748 abi_ulong target_fds_addr,
751 if (target_fds_addr) {
752 if (copy_from_user_fdset(fds, target_fds_addr, n))
753 return -TARGET_EFAULT;
761 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
767 abi_ulong *target_fds;
769 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
770 if (!(target_fds = lock_user(VERIFY_WRITE,
772 sizeof(abi_ulong) * nw,
774 return -TARGET_EFAULT;
777 for (i = 0; i < nw; i++) {
779 for (j = 0; j < TARGET_ABI_BITS; j++) {
780 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
783 __put_user(v, &target_fds[i]);
786 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
791 #if defined(__alpha__)
797 static inline abi_long host_to_target_clock_t(long ticks)
799 #if HOST_HZ == TARGET_HZ
802 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
806 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
807 const struct rusage *rusage)
809 struct target_rusage *target_rusage;
811 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
812 return -TARGET_EFAULT;
813 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
814 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
815 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
816 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
817 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
818 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
819 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
820 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
821 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
822 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
823 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
824 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
825 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
826 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
827 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
828 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
829 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
830 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
831 unlock_user_struct(target_rusage, target_addr, 1);
836 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
838 abi_ulong target_rlim_swap;
841 target_rlim_swap = tswapal(target_rlim);
842 if (target_rlim_swap == TARGET_RLIM_INFINITY)
843 return RLIM_INFINITY;
845 result = target_rlim_swap;
846 if (target_rlim_swap != (rlim_t)result)
847 return RLIM_INFINITY;
852 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
854 abi_ulong target_rlim_swap;
857 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
858 target_rlim_swap = TARGET_RLIM_INFINITY;
860 target_rlim_swap = rlim;
861 result = tswapal(target_rlim_swap);
866 static inline int target_to_host_resource(int code)
869 case TARGET_RLIMIT_AS:
871 case TARGET_RLIMIT_CORE:
873 case TARGET_RLIMIT_CPU:
875 case TARGET_RLIMIT_DATA:
877 case TARGET_RLIMIT_FSIZE:
879 case TARGET_RLIMIT_LOCKS:
881 case TARGET_RLIMIT_MEMLOCK:
882 return RLIMIT_MEMLOCK;
883 case TARGET_RLIMIT_MSGQUEUE:
884 return RLIMIT_MSGQUEUE;
885 case TARGET_RLIMIT_NICE:
887 case TARGET_RLIMIT_NOFILE:
888 return RLIMIT_NOFILE;
889 case TARGET_RLIMIT_NPROC:
891 case TARGET_RLIMIT_RSS:
893 case TARGET_RLIMIT_RTPRIO:
894 return RLIMIT_RTPRIO;
895 case TARGET_RLIMIT_SIGPENDING:
896 return RLIMIT_SIGPENDING;
897 case TARGET_RLIMIT_STACK:
904 static inline abi_long copy_from_user_timeval(struct timeval *tv,
905 abi_ulong target_tv_addr)
907 struct target_timeval *target_tv;
909 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
910 return -TARGET_EFAULT;
912 __get_user(tv->tv_sec, &target_tv->tv_sec);
913 __get_user(tv->tv_usec, &target_tv->tv_usec);
915 unlock_user_struct(target_tv, target_tv_addr, 0);
920 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
921 const struct timeval *tv)
923 struct target_timeval *target_tv;
925 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
926 return -TARGET_EFAULT;
928 __put_user(tv->tv_sec, &target_tv->tv_sec);
929 __put_user(tv->tv_usec, &target_tv->tv_usec);
931 unlock_user_struct(target_tv, target_tv_addr, 1);
936 static inline abi_long copy_from_user_timezone(struct timezone *tz,
937 abi_ulong target_tz_addr)
939 struct target_timezone *target_tz;
941 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
942 return -TARGET_EFAULT;
945 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
946 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
948 unlock_user_struct(target_tz, target_tz_addr, 0);
953 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
956 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
957 abi_ulong target_mq_attr_addr)
959 struct target_mq_attr *target_mq_attr;
961 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
962 target_mq_attr_addr, 1))
963 return -TARGET_EFAULT;
965 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
966 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
967 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
968 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
970 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
975 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
976 const struct mq_attr *attr)
978 struct target_mq_attr *target_mq_attr;
980 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
981 target_mq_attr_addr, 0))
982 return -TARGET_EFAULT;
984 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
985 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
986 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
987 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
989 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
995 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
996 /* do_select() must return target values and target errnos. */
997 static abi_long do_select(int n,
998 abi_ulong rfd_addr, abi_ulong wfd_addr,
999 abi_ulong efd_addr, abi_ulong target_tv_addr)
1001 fd_set rfds, wfds, efds;
1002 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1003 struct timeval tv, *tv_ptr;
1006 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1010 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1014 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1019 if (target_tv_addr) {
1020 if (copy_from_user_timeval(&tv, target_tv_addr))
1021 return -TARGET_EFAULT;
1027 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1029 if (!is_error(ret)) {
1030 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1031 return -TARGET_EFAULT;
1032 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1033 return -TARGET_EFAULT;
1034 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1035 return -TARGET_EFAULT;
1037 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1038 return -TARGET_EFAULT;
1045 static abi_long do_pipe2(int host_pipe[], int flags)
1048 return pipe2(host_pipe, flags);
1054 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1055 int flags, int is_pipe2)
1059 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1062 return get_errno(ret);
1064 /* Several targets have special calling conventions for the original
1065 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1067 #if defined(TARGET_ALPHA)
1068 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1069 return host_pipe[0];
1070 #elif defined(TARGET_MIPS)
1071 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1072 return host_pipe[0];
1073 #elif defined(TARGET_SH4)
1074 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1075 return host_pipe[0];
1076 #elif defined(TARGET_SPARC)
1077 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1078 return host_pipe[0];
1082 if (put_user_s32(host_pipe[0], pipedes)
1083 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1084 return -TARGET_EFAULT;
1085 return get_errno(ret);
1088 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1089 abi_ulong target_addr,
1092 struct target_ip_mreqn *target_smreqn;
1094 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1096 return -TARGET_EFAULT;
1097 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1098 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1099 if (len == sizeof(struct target_ip_mreqn))
1100 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1101 unlock_user(target_smreqn, target_addr, 0);
1106 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1107 abi_ulong target_addr,
1110 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1111 sa_family_t sa_family;
1112 struct target_sockaddr *target_saddr;
1114 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1116 return -TARGET_EFAULT;
1118 sa_family = tswap16(target_saddr->sa_family);
1120 /* Oops. The caller might send a incomplete sun_path; sun_path
1121 * must be terminated by \0 (see the manual page), but
1122 * unfortunately it is quite common to specify sockaddr_un
1123 * length as "strlen(x->sun_path)" while it should be
1124 * "strlen(...) + 1". We'll fix that here if needed.
1125 * Linux kernel has a similar feature.
1128 if (sa_family == AF_UNIX) {
1129 if (len < unix_maxlen && len > 0) {
1130 char *cp = (char*)target_saddr;
1132 if ( cp[len-1] && !cp[len] )
1135 if (len > unix_maxlen)
1139 memcpy(addr, target_saddr, len);
1140 addr->sa_family = sa_family;
1141 if (sa_family == AF_PACKET) {
1142 struct target_sockaddr_ll *lladdr;
1144 lladdr = (struct target_sockaddr_ll *)addr;
1145 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1146 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1148 unlock_user(target_saddr, target_addr, 0);
1153 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1154 struct sockaddr *addr,
1157 struct target_sockaddr *target_saddr;
1159 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1161 return -TARGET_EFAULT;
1162 memcpy(target_saddr, addr, len);
1163 target_saddr->sa_family = tswap16(addr->sa_family);
1164 unlock_user(target_saddr, target_addr, len);
1169 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1170 struct target_msghdr *target_msgh)
1172 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1173 abi_long msg_controllen;
1174 abi_ulong target_cmsg_addr;
1175 struct target_cmsghdr *target_cmsg;
1176 socklen_t space = 0;
1178 msg_controllen = tswapal(target_msgh->msg_controllen);
1179 if (msg_controllen < sizeof (struct target_cmsghdr))
1181 target_cmsg_addr = tswapal(target_msgh->msg_control);
1182 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1184 return -TARGET_EFAULT;
1186 while (cmsg && target_cmsg) {
1187 void *data = CMSG_DATA(cmsg);
1188 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1190 int len = tswapal(target_cmsg->cmsg_len)
1191 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1193 space += CMSG_SPACE(len);
1194 if (space > msgh->msg_controllen) {
1195 space -= CMSG_SPACE(len);
1196 gemu_log("Host cmsg overflow\n");
1200 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1201 cmsg->cmsg_level = SOL_SOCKET;
1203 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1205 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1206 cmsg->cmsg_len = CMSG_LEN(len);
1208 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1209 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1210 memcpy(data, target_data, len);
1212 int *fd = (int *)data;
1213 int *target_fd = (int *)target_data;
1214 int i, numfds = len / sizeof(int);
1216 for (i = 0; i < numfds; i++)
1217 fd[i] = tswap32(target_fd[i]);
1220 cmsg = CMSG_NXTHDR(msgh, cmsg);
1221 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1223 unlock_user(target_cmsg, target_cmsg_addr, 0);
1225 msgh->msg_controllen = space;
1229 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1230 struct msghdr *msgh)
1232 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1233 abi_long msg_controllen;
1234 abi_ulong target_cmsg_addr;
1235 struct target_cmsghdr *target_cmsg;
1236 socklen_t space = 0;
1238 msg_controllen = tswapal(target_msgh->msg_controllen);
1239 if (msg_controllen < sizeof (struct target_cmsghdr))
1241 target_cmsg_addr = tswapal(target_msgh->msg_control);
1242 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1244 return -TARGET_EFAULT;
1246 while (cmsg && target_cmsg) {
1247 void *data = CMSG_DATA(cmsg);
1248 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1250 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1252 space += TARGET_CMSG_SPACE(len);
1253 if (space > msg_controllen) {
1254 space -= TARGET_CMSG_SPACE(len);
1255 gemu_log("Target cmsg overflow\n");
1259 if (cmsg->cmsg_level == SOL_SOCKET) {
1260 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1262 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1264 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1265 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1267 switch (cmsg->cmsg_level) {
1269 switch (cmsg->cmsg_type) {
1272 int *fd = (int *)data;
1273 int *target_fd = (int *)target_data;
1274 int i, numfds = len / sizeof(int);
1276 for (i = 0; i < numfds; i++)
1277 target_fd[i] = tswap32(fd[i]);
1282 struct timeval *tv = (struct timeval *)data;
1283 struct target_timeval *target_tv =
1284 (struct target_timeval *)target_data;
1286 if (len != sizeof(struct timeval))
1289 /* copy struct timeval to target */
1290 target_tv->tv_sec = tswapal(tv->tv_sec);
1291 target_tv->tv_usec = tswapal(tv->tv_usec);
1294 case SCM_CREDENTIALS:
1296 struct ucred *cred = (struct ucred *)data;
1297 struct target_ucred *target_cred =
1298 (struct target_ucred *)target_data;
1300 __put_user(cred->pid, &target_cred->pid);
1301 __put_user(cred->uid, &target_cred->uid);
1302 __put_user(cred->gid, &target_cred->gid);
1312 gemu_log("Unsupported ancillary data: %d/%d\n",
1313 cmsg->cmsg_level, cmsg->cmsg_type);
1314 memcpy(target_data, data, len);
1317 cmsg = CMSG_NXTHDR(msgh, cmsg);
1318 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1320 unlock_user(target_cmsg, target_cmsg_addr, space);
1322 target_msgh->msg_controllen = tswapal(space);
1326 /* do_setsockopt() Must return target values and target errnos. */
1327 static abi_long do_setsockopt(int sockfd, int level, int optname,
1328 abi_ulong optval_addr, socklen_t optlen)
1332 struct ip_mreqn *ip_mreq;
1333 struct ip_mreq_source *ip_mreq_source;
1337 /* TCP options all take an 'int' value. */
1338 if (optlen < sizeof(uint32_t))
1339 return -TARGET_EINVAL;
1341 if (get_user_u32(val, optval_addr))
1342 return -TARGET_EFAULT;
1343 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1350 case IP_ROUTER_ALERT:
1354 case IP_MTU_DISCOVER:
1360 case IP_MULTICAST_TTL:
1361 case IP_MULTICAST_LOOP:
1363 if (optlen >= sizeof(uint32_t)) {
1364 if (get_user_u32(val, optval_addr))
1365 return -TARGET_EFAULT;
1366 } else if (optlen >= 1) {
1367 if (get_user_u8(val, optval_addr))
1368 return -TARGET_EFAULT;
1370 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1372 case IP_ADD_MEMBERSHIP:
1373 case IP_DROP_MEMBERSHIP:
1374 if (optlen < sizeof (struct target_ip_mreq) ||
1375 optlen > sizeof (struct target_ip_mreqn))
1376 return -TARGET_EINVAL;
1378 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1379 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1380 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1383 case IP_BLOCK_SOURCE:
1384 case IP_UNBLOCK_SOURCE:
1385 case IP_ADD_SOURCE_MEMBERSHIP:
1386 case IP_DROP_SOURCE_MEMBERSHIP:
1387 if (optlen != sizeof (struct target_ip_mreq_source))
1388 return -TARGET_EINVAL;
1390 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1391 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1392 unlock_user (ip_mreq_source, optval_addr, 0);
1401 case IPV6_MTU_DISCOVER:
1404 case IPV6_RECVPKTINFO:
1406 if (optlen < sizeof(uint32_t)) {
1407 return -TARGET_EINVAL;
1409 if (get_user_u32(val, optval_addr)) {
1410 return -TARGET_EFAULT;
1412 ret = get_errno(setsockopt(sockfd, level, optname,
1413 &val, sizeof(val)));
1422 /* struct icmp_filter takes an u32 value */
1423 if (optlen < sizeof(uint32_t)) {
1424 return -TARGET_EINVAL;
1427 if (get_user_u32(val, optval_addr)) {
1428 return -TARGET_EFAULT;
1430 ret = get_errno(setsockopt(sockfd, level, optname,
1431 &val, sizeof(val)));
1438 case TARGET_SOL_SOCKET:
1440 case TARGET_SO_RCVTIMEO:
1444 optname = SO_RCVTIMEO;
1447 if (optlen != sizeof(struct target_timeval)) {
1448 return -TARGET_EINVAL;
1451 if (copy_from_user_timeval(&tv, optval_addr)) {
1452 return -TARGET_EFAULT;
1455 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1459 case TARGET_SO_SNDTIMEO:
1460 optname = SO_SNDTIMEO;
1462 case TARGET_SO_ATTACH_FILTER:
1464 struct target_sock_fprog *tfprog;
1465 struct target_sock_filter *tfilter;
1466 struct sock_fprog fprog;
1467 struct sock_filter *filter;
1470 if (optlen != sizeof(*tfprog)) {
1471 return -TARGET_EINVAL;
1473 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1474 return -TARGET_EFAULT;
1476 if (!lock_user_struct(VERIFY_READ, tfilter,
1477 tswapal(tfprog->filter), 0)) {
1478 unlock_user_struct(tfprog, optval_addr, 1);
1479 return -TARGET_EFAULT;
1482 fprog.len = tswap16(tfprog->len);
1483 filter = malloc(fprog.len * sizeof(*filter));
1484 if (filter == NULL) {
1485 unlock_user_struct(tfilter, tfprog->filter, 1);
1486 unlock_user_struct(tfprog, optval_addr, 1);
1487 return -TARGET_ENOMEM;
1489 for (i = 0; i < fprog.len; i++) {
1490 filter[i].code = tswap16(tfilter[i].code);
1491 filter[i].jt = tfilter[i].jt;
1492 filter[i].jf = tfilter[i].jf;
1493 filter[i].k = tswap32(tfilter[i].k);
1495 fprog.filter = filter;
1497 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1498 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1501 unlock_user_struct(tfilter, tfprog->filter, 1);
1502 unlock_user_struct(tfprog, optval_addr, 1);
1505 case TARGET_SO_BINDTODEVICE:
1507 char *dev_ifname, *addr_ifname;
1509 if (optlen > IFNAMSIZ - 1) {
1510 optlen = IFNAMSIZ - 1;
1512 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1514 return -TARGET_EFAULT;
1516 optname = SO_BINDTODEVICE;
1517 addr_ifname = alloca(IFNAMSIZ);
1518 memcpy(addr_ifname, dev_ifname, optlen);
1519 addr_ifname[optlen] = 0;
1520 ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen));
1521 unlock_user (dev_ifname, optval_addr, 0);
1524 /* Options with 'int' argument. */
1525 case TARGET_SO_DEBUG:
1528 case TARGET_SO_REUSEADDR:
1529 optname = SO_REUSEADDR;
1531 case TARGET_SO_TYPE:
1534 case TARGET_SO_ERROR:
1537 case TARGET_SO_DONTROUTE:
1538 optname = SO_DONTROUTE;
1540 case TARGET_SO_BROADCAST:
1541 optname = SO_BROADCAST;
1543 case TARGET_SO_SNDBUF:
1544 optname = SO_SNDBUF;
1546 case TARGET_SO_SNDBUFFORCE:
1547 optname = SO_SNDBUFFORCE;
1549 case TARGET_SO_RCVBUF:
1550 optname = SO_RCVBUF;
1552 case TARGET_SO_RCVBUFFORCE:
1553 optname = SO_RCVBUFFORCE;
1555 case TARGET_SO_KEEPALIVE:
1556 optname = SO_KEEPALIVE;
1558 case TARGET_SO_OOBINLINE:
1559 optname = SO_OOBINLINE;
1561 case TARGET_SO_NO_CHECK:
1562 optname = SO_NO_CHECK;
1564 case TARGET_SO_PRIORITY:
1565 optname = SO_PRIORITY;
1568 case TARGET_SO_BSDCOMPAT:
1569 optname = SO_BSDCOMPAT;
1572 case TARGET_SO_PASSCRED:
1573 optname = SO_PASSCRED;
1575 case TARGET_SO_PASSSEC:
1576 optname = SO_PASSSEC;
1578 case TARGET_SO_TIMESTAMP:
1579 optname = SO_TIMESTAMP;
1581 case TARGET_SO_RCVLOWAT:
1582 optname = SO_RCVLOWAT;
1588 if (optlen < sizeof(uint32_t))
1589 return -TARGET_EINVAL;
1591 if (get_user_u32(val, optval_addr))
1592 return -TARGET_EFAULT;
1593 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1597 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1598 ret = -TARGET_ENOPROTOOPT;
1603 /* do_getsockopt() Must return target values and target errnos. */
1604 static abi_long do_getsockopt(int sockfd, int level, int optname,
1605 abi_ulong optval_addr, abi_ulong optlen)
1612 case TARGET_SOL_SOCKET:
1615 /* These don't just return a single integer */
1616 case TARGET_SO_LINGER:
1617 case TARGET_SO_RCVTIMEO:
1618 case TARGET_SO_SNDTIMEO:
1619 case TARGET_SO_PEERNAME:
1621 case TARGET_SO_PEERCRED: {
1624 struct target_ucred *tcr;
1626 if (get_user_u32(len, optlen)) {
1627 return -TARGET_EFAULT;
1630 return -TARGET_EINVAL;
1634 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1642 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1643 return -TARGET_EFAULT;
1645 __put_user(cr.pid, &tcr->pid);
1646 __put_user(cr.uid, &tcr->uid);
1647 __put_user(cr.gid, &tcr->gid);
1648 unlock_user_struct(tcr, optval_addr, 1);
1649 if (put_user_u32(len, optlen)) {
1650 return -TARGET_EFAULT;
1654 /* Options with 'int' argument. */
1655 case TARGET_SO_DEBUG:
1658 case TARGET_SO_REUSEADDR:
1659 optname = SO_REUSEADDR;
1661 case TARGET_SO_TYPE:
1664 case TARGET_SO_ERROR:
1667 case TARGET_SO_DONTROUTE:
1668 optname = SO_DONTROUTE;
1670 case TARGET_SO_BROADCAST:
1671 optname = SO_BROADCAST;
1673 case TARGET_SO_SNDBUF:
1674 optname = SO_SNDBUF;
1676 case TARGET_SO_RCVBUF:
1677 optname = SO_RCVBUF;
1679 case TARGET_SO_KEEPALIVE:
1680 optname = SO_KEEPALIVE;
1682 case TARGET_SO_OOBINLINE:
1683 optname = SO_OOBINLINE;
1685 case TARGET_SO_NO_CHECK:
1686 optname = SO_NO_CHECK;
1688 case TARGET_SO_PRIORITY:
1689 optname = SO_PRIORITY;
1692 case TARGET_SO_BSDCOMPAT:
1693 optname = SO_BSDCOMPAT;
1696 case TARGET_SO_PASSCRED:
1697 optname = SO_PASSCRED;
1699 case TARGET_SO_TIMESTAMP:
1700 optname = SO_TIMESTAMP;
1702 case TARGET_SO_RCVLOWAT:
1703 optname = SO_RCVLOWAT;
1705 case TARGET_SO_ACCEPTCONN:
1706 optname = SO_ACCEPTCONN;
1713 /* TCP options all take an 'int' value. */
1715 if (get_user_u32(len, optlen))
1716 return -TARGET_EFAULT;
1718 return -TARGET_EINVAL;
1720 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1723 if (optname == SO_TYPE) {
1724 val = host_to_target_sock_type(val);
1729 if (put_user_u32(val, optval_addr))
1730 return -TARGET_EFAULT;
1732 if (put_user_u8(val, optval_addr))
1733 return -TARGET_EFAULT;
1735 if (put_user_u32(len, optlen))
1736 return -TARGET_EFAULT;
1743 case IP_ROUTER_ALERT:
1747 case IP_MTU_DISCOVER:
1753 case IP_MULTICAST_TTL:
1754 case IP_MULTICAST_LOOP:
1755 if (get_user_u32(len, optlen))
1756 return -TARGET_EFAULT;
1758 return -TARGET_EINVAL;
1760 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1763 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1765 if (put_user_u32(len, optlen)
1766 || put_user_u8(val, optval_addr))
1767 return -TARGET_EFAULT;
1769 if (len > sizeof(int))
1771 if (put_user_u32(len, optlen)
1772 || put_user_u32(val, optval_addr))
1773 return -TARGET_EFAULT;
1777 ret = -TARGET_ENOPROTOOPT;
1783 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1785 ret = -TARGET_EOPNOTSUPP;
1791 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1792 int count, int copy)
1794 struct target_iovec *target_vec;
1796 abi_ulong total_len, max_len;
1804 if (count < 0 || count > IOV_MAX) {
1809 vec = calloc(count, sizeof(struct iovec));
1815 target_vec = lock_user(VERIFY_READ, target_addr,
1816 count * sizeof(struct target_iovec), 1);
1817 if (target_vec == NULL) {
1822 /* ??? If host page size > target page size, this will result in a
1823 value larger than what we can actually support. */
1824 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1827 for (i = 0; i < count; i++) {
1828 abi_ulong base = tswapal(target_vec[i].iov_base);
1829 abi_long len = tswapal(target_vec[i].iov_len);
1834 } else if (len == 0) {
1835 /* Zero length pointer is ignored. */
1836 vec[i].iov_base = 0;
1838 vec[i].iov_base = lock_user(type, base, len, copy);
1839 if (!vec[i].iov_base) {
1843 if (len > max_len - total_len) {
1844 len = max_len - total_len;
1847 vec[i].iov_len = len;
1851 unlock_user(target_vec, target_addr, 0);
1855 unlock_user(target_vec, target_addr, 0);
1862 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1863 int count, int copy)
1865 struct target_iovec *target_vec;
1868 target_vec = lock_user(VERIFY_READ, target_addr,
1869 count * sizeof(struct target_iovec), 1);
1871 for (i = 0; i < count; i++) {
1872 abi_ulong base = tswapal(target_vec[i].iov_base);
1873 abi_long len = tswapal(target_vec[i].iov_base);
1877 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1879 unlock_user(target_vec, target_addr, 0);
1885 static inline int target_to_host_sock_type(int *type)
1888 int target_type = *type;
1890 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1891 case TARGET_SOCK_DGRAM:
1892 host_type = SOCK_DGRAM;
1894 case TARGET_SOCK_STREAM:
1895 host_type = SOCK_STREAM;
1898 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1901 if (target_type & TARGET_SOCK_CLOEXEC) {
1902 #if defined(SOCK_CLOEXEC)
1903 host_type |= SOCK_CLOEXEC;
1905 return -TARGET_EINVAL;
1908 if (target_type & TARGET_SOCK_NONBLOCK) {
1909 #if defined(SOCK_NONBLOCK)
1910 host_type |= SOCK_NONBLOCK;
1911 #elif !defined(O_NONBLOCK)
1912 return -TARGET_EINVAL;
1919 /* Try to emulate socket type flags after socket creation. */
1920 static int sock_flags_fixup(int fd, int target_type)
1922 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1923 if (target_type & TARGET_SOCK_NONBLOCK) {
1924 int flags = fcntl(fd, F_GETFL);
1925 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
1927 return -TARGET_EINVAL;
1934 /* do_socket() Must return target values and target errnos. */
1935 static abi_long do_socket(int domain, int type, int protocol)
1937 int target_type = type;
1940 ret = target_to_host_sock_type(&type);
1945 if (domain == PF_NETLINK)
1946 return -TARGET_EAFNOSUPPORT;
1947 ret = get_errno(socket(domain, type, protocol));
1949 ret = sock_flags_fixup(ret, target_type);
1954 /* do_bind() Must return target values and target errnos. */
1955 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1961 if ((int)addrlen < 0) {
1962 return -TARGET_EINVAL;
1965 addr = alloca(addrlen+1);
1967 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1971 return get_errno(bind(sockfd, addr, addrlen));
1974 /* do_connect() Must return target values and target errnos. */
1975 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1981 if ((int)addrlen < 0) {
1982 return -TARGET_EINVAL;
1985 addr = alloca(addrlen+1);
1987 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1991 return get_errno(connect(sockfd, addr, addrlen));
1994 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
1995 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
1996 int flags, int send)
2002 abi_ulong target_vec;
2004 if (msgp->msg_name) {
2005 msg.msg_namelen = tswap32(msgp->msg_namelen);
2006 msg.msg_name = alloca(msg.msg_namelen+1);
2007 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
2013 msg.msg_name = NULL;
2014 msg.msg_namelen = 0;
2016 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2017 msg.msg_control = alloca(msg.msg_controllen);
2018 msg.msg_flags = tswap32(msgp->msg_flags);
2020 count = tswapal(msgp->msg_iovlen);
2021 target_vec = tswapal(msgp->msg_iov);
2022 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2023 target_vec, count, send);
2025 ret = -host_to_target_errno(errno);
2028 msg.msg_iovlen = count;
2032 ret = target_to_host_cmsg(&msg, msgp);
2034 ret = get_errno(sendmsg(fd, &msg, flags));
2036 ret = get_errno(recvmsg(fd, &msg, flags));
2037 if (!is_error(ret)) {
2039 ret = host_to_target_cmsg(msgp, &msg);
2040 if (!is_error(ret)) {
2041 msgp->msg_namelen = tswap32(msg.msg_namelen);
2042 if (msg.msg_name != NULL) {
2043 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2044 msg.msg_name, msg.msg_namelen);
2056 unlock_iovec(vec, target_vec, count, !send);
2061 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2062 int flags, int send)
2065 struct target_msghdr *msgp;
2067 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2071 return -TARGET_EFAULT;
2073 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2074 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2078 #ifdef TARGET_NR_sendmmsg
2079 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2080 * so it might not have this *mmsg-specific flag either.
2082 #ifndef MSG_WAITFORONE
2083 #define MSG_WAITFORONE 0x10000
2086 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2087 unsigned int vlen, unsigned int flags,
2090 struct target_mmsghdr *mmsgp;
2094 if (vlen > UIO_MAXIOV) {
2098 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2100 return -TARGET_EFAULT;
2103 for (i = 0; i < vlen; i++) {
2104 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2105 if (is_error(ret)) {
2108 mmsgp[i].msg_len = tswap32(ret);
2109 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2110 if (flags & MSG_WAITFORONE) {
2111 flags |= MSG_DONTWAIT;
2115 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2117 /* Return number of datagrams sent if we sent any at all;
2118 * otherwise return the error.
2127 /* If we don't have a system accept4() then just call accept.
2128 * The callsites to do_accept4() will ensure that they don't
2129 * pass a non-zero flags argument in this config.
2131 #ifndef CONFIG_ACCEPT4
2132 static inline int accept4(int sockfd, struct sockaddr *addr,
2133 socklen_t *addrlen, int flags)
2136 return accept(sockfd, addr, addrlen);
2140 /* do_accept4() Must return target values and target errnos. */
2141 static abi_long do_accept4(int fd, abi_ulong target_addr,
2142 abi_ulong target_addrlen_addr, int flags)
2149 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2151 if (target_addr == 0) {
2152 return get_errno(accept4(fd, NULL, NULL, host_flags));
2155 /* linux returns EINVAL if addrlen pointer is invalid */
2156 if (get_user_u32(addrlen, target_addrlen_addr))
2157 return -TARGET_EINVAL;
2159 if ((int)addrlen < 0) {
2160 return -TARGET_EINVAL;
2163 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2164 return -TARGET_EINVAL;
2166 addr = alloca(addrlen);
2168 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2169 if (!is_error(ret)) {
2170 host_to_target_sockaddr(target_addr, addr, addrlen);
2171 if (put_user_u32(addrlen, target_addrlen_addr))
2172 ret = -TARGET_EFAULT;
2177 /* do_getpeername() Must return target values and target errnos. */
2178 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2179 abi_ulong target_addrlen_addr)
2185 if (get_user_u32(addrlen, target_addrlen_addr))
2186 return -TARGET_EFAULT;
2188 if ((int)addrlen < 0) {
2189 return -TARGET_EINVAL;
2192 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2193 return -TARGET_EFAULT;
2195 addr = alloca(addrlen);
2197 ret = get_errno(getpeername(fd, addr, &addrlen));
2198 if (!is_error(ret)) {
2199 host_to_target_sockaddr(target_addr, addr, addrlen);
2200 if (put_user_u32(addrlen, target_addrlen_addr))
2201 ret = -TARGET_EFAULT;
2206 /* do_getsockname() Must return target values and target errnos. */
2207 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2208 abi_ulong target_addrlen_addr)
2214 if (get_user_u32(addrlen, target_addrlen_addr))
2215 return -TARGET_EFAULT;
2217 if ((int)addrlen < 0) {
2218 return -TARGET_EINVAL;
2221 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2222 return -TARGET_EFAULT;
2224 addr = alloca(addrlen);
2226 ret = get_errno(getsockname(fd, addr, &addrlen));
2227 if (!is_error(ret)) {
2228 host_to_target_sockaddr(target_addr, addr, addrlen);
2229 if (put_user_u32(addrlen, target_addrlen_addr))
2230 ret = -TARGET_EFAULT;
2235 /* do_socketpair() Must return target values and target errnos. */
2236 static abi_long do_socketpair(int domain, int type, int protocol,
2237 abi_ulong target_tab_addr)
2242 target_to_host_sock_type(&type);
2244 ret = get_errno(socketpair(domain, type, protocol, tab));
2245 if (!is_error(ret)) {
2246 if (put_user_s32(tab[0], target_tab_addr)
2247 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2248 ret = -TARGET_EFAULT;
2253 /* do_sendto() Must return target values and target errnos. */
2254 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2255 abi_ulong target_addr, socklen_t addrlen)
2261 if ((int)addrlen < 0) {
2262 return -TARGET_EINVAL;
2265 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2267 return -TARGET_EFAULT;
2269 addr = alloca(addrlen+1);
2270 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2272 unlock_user(host_msg, msg, 0);
2275 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2277 ret = get_errno(send(fd, host_msg, len, flags));
2279 unlock_user(host_msg, msg, 0);
2283 /* do_recvfrom() Must return target values and target errnos. */
2284 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2285 abi_ulong target_addr,
2286 abi_ulong target_addrlen)
2293 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2295 return -TARGET_EFAULT;
2297 if (get_user_u32(addrlen, target_addrlen)) {
2298 ret = -TARGET_EFAULT;
2301 if ((int)addrlen < 0) {
2302 ret = -TARGET_EINVAL;
2305 addr = alloca(addrlen);
2306 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2308 addr = NULL; /* To keep compiler quiet. */
2309 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2311 if (!is_error(ret)) {
2313 host_to_target_sockaddr(target_addr, addr, addrlen);
2314 if (put_user_u32(addrlen, target_addrlen)) {
2315 ret = -TARGET_EFAULT;
2319 unlock_user(host_msg, msg, len);
2322 unlock_user(host_msg, msg, 0);
2327 #ifdef TARGET_NR_socketcall
2328 /* do_socketcall() Must return target values and target errnos. */
2329 static abi_long do_socketcall(int num, abi_ulong vptr)
2331 static const unsigned ac[] = { /* number of arguments per call */
2332 [SOCKOP_socket] = 3, /* domain, type, protocol */
2333 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2334 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2335 [SOCKOP_listen] = 2, /* sockfd, backlog */
2336 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2337 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2338 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2339 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2340 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2341 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2342 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2343 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2344 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2345 [SOCKOP_shutdown] = 2, /* sockfd, how */
2346 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2347 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2348 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2349 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2351 abi_long a[6]; /* max 6 args */
2353 /* first, collect the arguments in a[] according to ac[] */
2354 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2356 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2357 for (i = 0; i < ac[num]; ++i) {
2358 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2359 return -TARGET_EFAULT;
2364 /* now when we have the args, actually handle the call */
2366 case SOCKOP_socket: /* domain, type, protocol */
2367 return do_socket(a[0], a[1], a[2]);
2368 case SOCKOP_bind: /* sockfd, addr, addrlen */
2369 return do_bind(a[0], a[1], a[2]);
2370 case SOCKOP_connect: /* sockfd, addr, addrlen */
2371 return do_connect(a[0], a[1], a[2]);
2372 case SOCKOP_listen: /* sockfd, backlog */
2373 return get_errno(listen(a[0], a[1]));
2374 case SOCKOP_accept: /* sockfd, addr, addrlen */
2375 return do_accept4(a[0], a[1], a[2], 0);
2376 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2377 return do_accept4(a[0], a[1], a[2], a[3]);
2378 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2379 return do_getsockname(a[0], a[1], a[2]);
2380 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2381 return do_getpeername(a[0], a[1], a[2]);
2382 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2383 return do_socketpair(a[0], a[1], a[2], a[3]);
2384 case SOCKOP_send: /* sockfd, msg, len, flags */
2385 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2386 case SOCKOP_recv: /* sockfd, msg, len, flags */
2387 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2388 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2389 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2390 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2391 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2392 case SOCKOP_shutdown: /* sockfd, how */
2393 return get_errno(shutdown(a[0], a[1]));
2394 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2395 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2396 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2397 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2398 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2399 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2400 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2401 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2403 gemu_log("Unsupported socketcall: %d\n", num);
2404 return -TARGET_ENOSYS;
2409 #define N_SHM_REGIONS 32
2411 static struct shm_region {
2414 } shm_regions[N_SHM_REGIONS];
2416 struct target_semid_ds
2418 struct target_ipc_perm sem_perm;
2419 abi_ulong sem_otime;
2420 abi_ulong __unused1;
2421 abi_ulong sem_ctime;
2422 abi_ulong __unused2;
2423 abi_ulong sem_nsems;
2424 abi_ulong __unused3;
2425 abi_ulong __unused4;
2428 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2429 abi_ulong target_addr)
2431 struct target_ipc_perm *target_ip;
2432 struct target_semid_ds *target_sd;
2434 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2435 return -TARGET_EFAULT;
2436 target_ip = &(target_sd->sem_perm);
2437 host_ip->__key = tswap32(target_ip->__key);
2438 host_ip->uid = tswap32(target_ip->uid);
2439 host_ip->gid = tswap32(target_ip->gid);
2440 host_ip->cuid = tswap32(target_ip->cuid);
2441 host_ip->cgid = tswap32(target_ip->cgid);
2442 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2443 host_ip->mode = tswap32(target_ip->mode);
2445 host_ip->mode = tswap16(target_ip->mode);
2447 #if defined(TARGET_PPC)
2448 host_ip->__seq = tswap32(target_ip->__seq);
2450 host_ip->__seq = tswap16(target_ip->__seq);
2452 unlock_user_struct(target_sd, target_addr, 0);
2456 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2457 struct ipc_perm *host_ip)
2459 struct target_ipc_perm *target_ip;
2460 struct target_semid_ds *target_sd;
2462 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2463 return -TARGET_EFAULT;
2464 target_ip = &(target_sd->sem_perm);
2465 target_ip->__key = tswap32(host_ip->__key);
2466 target_ip->uid = tswap32(host_ip->uid);
2467 target_ip->gid = tswap32(host_ip->gid);
2468 target_ip->cuid = tswap32(host_ip->cuid);
2469 target_ip->cgid = tswap32(host_ip->cgid);
2470 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2471 target_ip->mode = tswap32(host_ip->mode);
2473 target_ip->mode = tswap16(host_ip->mode);
2475 #if defined(TARGET_PPC)
2476 target_ip->__seq = tswap32(host_ip->__seq);
2478 target_ip->__seq = tswap16(host_ip->__seq);
2480 unlock_user_struct(target_sd, target_addr, 1);
2484 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2485 abi_ulong target_addr)
2487 struct target_semid_ds *target_sd;
2489 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2490 return -TARGET_EFAULT;
2491 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2492 return -TARGET_EFAULT;
2493 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2494 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2495 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2496 unlock_user_struct(target_sd, target_addr, 0);
2500 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2501 struct semid_ds *host_sd)
2503 struct target_semid_ds *target_sd;
2505 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2506 return -TARGET_EFAULT;
2507 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2508 return -TARGET_EFAULT;
2509 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2510 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2511 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2512 unlock_user_struct(target_sd, target_addr, 1);
2516 struct target_seminfo {
2529 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2530 struct seminfo *host_seminfo)
2532 struct target_seminfo *target_seminfo;
2533 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2534 return -TARGET_EFAULT;
2535 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2536 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2537 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2538 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2539 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2540 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2541 __put_user(host_seminfo->semume, &target_seminfo->semume);
2542 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2543 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2544 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2545 unlock_user_struct(target_seminfo, target_addr, 1);
2551 struct semid_ds *buf;
2552 unsigned short *array;
2553 struct seminfo *__buf;
2556 union target_semun {
2563 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2564 abi_ulong target_addr)
2567 unsigned short *array;
2569 struct semid_ds semid_ds;
2572 semun.buf = &semid_ds;
2574 ret = semctl(semid, 0, IPC_STAT, semun);
2576 return get_errno(ret);
2578 nsems = semid_ds.sem_nsems;
2580 *host_array = malloc(nsems*sizeof(unsigned short));
2582 return -TARGET_ENOMEM;
2584 array = lock_user(VERIFY_READ, target_addr,
2585 nsems*sizeof(unsigned short), 1);
2588 return -TARGET_EFAULT;
2591 for(i=0; i<nsems; i++) {
2592 __get_user((*host_array)[i], &array[i]);
2594 unlock_user(array, target_addr, 0);
2599 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2600 unsigned short **host_array)
2603 unsigned short *array;
2605 struct semid_ds semid_ds;
2608 semun.buf = &semid_ds;
2610 ret = semctl(semid, 0, IPC_STAT, semun);
2612 return get_errno(ret);
2614 nsems = semid_ds.sem_nsems;
2616 array = lock_user(VERIFY_WRITE, target_addr,
2617 nsems*sizeof(unsigned short), 0);
2619 return -TARGET_EFAULT;
2621 for(i=0; i<nsems; i++) {
2622 __put_user((*host_array)[i], &array[i]);
2625 unlock_user(array, target_addr, 1);
2630 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2631 union target_semun target_su)
2634 struct semid_ds dsarg;
2635 unsigned short *array = NULL;
2636 struct seminfo seminfo;
2637 abi_long ret = -TARGET_EINVAL;
2644 arg.val = tswap32(target_su.val);
2645 ret = get_errno(semctl(semid, semnum, cmd, arg));
2646 target_su.val = tswap32(arg.val);
2650 err = target_to_host_semarray(semid, &array, target_su.array);
2654 ret = get_errno(semctl(semid, semnum, cmd, arg));
2655 err = host_to_target_semarray(semid, target_su.array, &array);
2662 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2666 ret = get_errno(semctl(semid, semnum, cmd, arg));
2667 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2673 arg.__buf = &seminfo;
2674 ret = get_errno(semctl(semid, semnum, cmd, arg));
2675 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2683 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2690 struct target_sembuf {
2691 unsigned short sem_num;
2696 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2697 abi_ulong target_addr,
2700 struct target_sembuf *target_sembuf;
2703 target_sembuf = lock_user(VERIFY_READ, target_addr,
2704 nsops*sizeof(struct target_sembuf), 1);
2706 return -TARGET_EFAULT;
2708 for(i=0; i<nsops; i++) {
2709 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2710 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2711 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2714 unlock_user(target_sembuf, target_addr, 0);
2719 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2721 struct sembuf sops[nsops];
2723 if (target_to_host_sembuf(sops, ptr, nsops))
2724 return -TARGET_EFAULT;
2726 return get_errno(semop(semid, sops, nsops));
2729 struct target_msqid_ds
2731 struct target_ipc_perm msg_perm;
2732 abi_ulong msg_stime;
2733 #if TARGET_ABI_BITS == 32
2734 abi_ulong __unused1;
2736 abi_ulong msg_rtime;
2737 #if TARGET_ABI_BITS == 32
2738 abi_ulong __unused2;
2740 abi_ulong msg_ctime;
2741 #if TARGET_ABI_BITS == 32
2742 abi_ulong __unused3;
2744 abi_ulong __msg_cbytes;
2746 abi_ulong msg_qbytes;
2747 abi_ulong msg_lspid;
2748 abi_ulong msg_lrpid;
2749 abi_ulong __unused4;
2750 abi_ulong __unused5;
2753 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2754 abi_ulong target_addr)
2756 struct target_msqid_ds *target_md;
2758 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2759 return -TARGET_EFAULT;
2760 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2761 return -TARGET_EFAULT;
2762 host_md->msg_stime = tswapal(target_md->msg_stime);
2763 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2764 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2765 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2766 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2767 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2768 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2769 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2770 unlock_user_struct(target_md, target_addr, 0);
2774 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2775 struct msqid_ds *host_md)
2777 struct target_msqid_ds *target_md;
2779 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2780 return -TARGET_EFAULT;
2781 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2782 return -TARGET_EFAULT;
2783 target_md->msg_stime = tswapal(host_md->msg_stime);
2784 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2785 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2786 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2787 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2788 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2789 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2790 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2791 unlock_user_struct(target_md, target_addr, 1);
2795 struct target_msginfo {
2803 unsigned short int msgseg;
2806 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2807 struct msginfo *host_msginfo)
2809 struct target_msginfo *target_msginfo;
2810 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2811 return -TARGET_EFAULT;
2812 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2813 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2814 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2815 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2816 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2817 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2818 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2819 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2820 unlock_user_struct(target_msginfo, target_addr, 1);
2824 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2826 struct msqid_ds dsarg;
2827 struct msginfo msginfo;
2828 abi_long ret = -TARGET_EINVAL;
2836 if (target_to_host_msqid_ds(&dsarg,ptr))
2837 return -TARGET_EFAULT;
2838 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2839 if (host_to_target_msqid_ds(ptr,&dsarg))
2840 return -TARGET_EFAULT;
2843 ret = get_errno(msgctl(msgid, cmd, NULL));
2847 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2848 if (host_to_target_msginfo(ptr, &msginfo))
2849 return -TARGET_EFAULT;
2856 struct target_msgbuf {
2861 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2862 unsigned int msgsz, int msgflg)
2864 struct target_msgbuf *target_mb;
2865 struct msgbuf *host_mb;
2868 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2869 return -TARGET_EFAULT;
2870 host_mb = malloc(msgsz+sizeof(long));
2871 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2872 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2873 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2875 unlock_user_struct(target_mb, msgp, 0);
2880 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2881 unsigned int msgsz, abi_long msgtyp,
2884 struct target_msgbuf *target_mb;
2886 struct msgbuf *host_mb;
2889 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2890 return -TARGET_EFAULT;
2892 host_mb = g_malloc(msgsz+sizeof(long));
2893 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2896 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2897 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2898 if (!target_mtext) {
2899 ret = -TARGET_EFAULT;
2902 memcpy(target_mb->mtext, host_mb->mtext, ret);
2903 unlock_user(target_mtext, target_mtext_addr, ret);
2906 target_mb->mtype = tswapal(host_mb->mtype);
2910 unlock_user_struct(target_mb, msgp, 1);
2915 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2916 abi_ulong target_addr)
2918 struct target_shmid_ds *target_sd;
2920 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2921 return -TARGET_EFAULT;
2922 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2923 return -TARGET_EFAULT;
2924 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2925 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2926 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2927 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2928 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2929 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2930 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2931 unlock_user_struct(target_sd, target_addr, 0);
2935 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2936 struct shmid_ds *host_sd)
2938 struct target_shmid_ds *target_sd;
2940 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2941 return -TARGET_EFAULT;
2942 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2943 return -TARGET_EFAULT;
2944 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2945 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2946 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2947 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2948 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2949 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2950 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2951 unlock_user_struct(target_sd, target_addr, 1);
2955 struct target_shminfo {
2963 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2964 struct shminfo *host_shminfo)
2966 struct target_shminfo *target_shminfo;
2967 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2968 return -TARGET_EFAULT;
2969 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2970 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2971 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2972 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2973 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2974 unlock_user_struct(target_shminfo, target_addr, 1);
2978 struct target_shm_info {
2983 abi_ulong swap_attempts;
2984 abi_ulong swap_successes;
2987 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2988 struct shm_info *host_shm_info)
2990 struct target_shm_info *target_shm_info;
2991 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2992 return -TARGET_EFAULT;
2993 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2994 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2995 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2996 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2997 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2998 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2999 unlock_user_struct(target_shm_info, target_addr, 1);
3003 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3005 struct shmid_ds dsarg;
3006 struct shminfo shminfo;
3007 struct shm_info shm_info;
3008 abi_long ret = -TARGET_EINVAL;
3016 if (target_to_host_shmid_ds(&dsarg, buf))
3017 return -TARGET_EFAULT;
3018 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3019 if (host_to_target_shmid_ds(buf, &dsarg))
3020 return -TARGET_EFAULT;
3023 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3024 if (host_to_target_shminfo(buf, &shminfo))
3025 return -TARGET_EFAULT;
3028 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3029 if (host_to_target_shm_info(buf, &shm_info))
3030 return -TARGET_EFAULT;
3035 ret = get_errno(shmctl(shmid, cmd, NULL));
3042 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3046 struct shmid_ds shm_info;
3049 /* find out the length of the shared memory segment */
3050 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3051 if (is_error(ret)) {
3052 /* can't get length, bail out */
3059 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3061 abi_ulong mmap_start;
3063 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3065 if (mmap_start == -1) {
3067 host_raddr = (void *)-1;
3069 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3072 if (host_raddr == (void *)-1) {
3074 return get_errno((long)host_raddr);
3076 raddr=h2g((unsigned long)host_raddr);
3078 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3079 PAGE_VALID | PAGE_READ |
3080 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3082 for (i = 0; i < N_SHM_REGIONS; i++) {
3083 if (shm_regions[i].start == 0) {
3084 shm_regions[i].start = raddr;
3085 shm_regions[i].size = shm_info.shm_segsz;
3095 static inline abi_long do_shmdt(abi_ulong shmaddr)
3099 for (i = 0; i < N_SHM_REGIONS; ++i) {
3100 if (shm_regions[i].start == shmaddr) {
3101 shm_regions[i].start = 0;
3102 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3107 return get_errno(shmdt(g2h(shmaddr)));
3110 #ifdef TARGET_NR_ipc
3111 /* ??? This only works with linear mappings. */
3112 /* do_ipc() must return target values and target errnos. */
3113 static abi_long do_ipc(unsigned int call, int first,
3114 int second, int third,
3115 abi_long ptr, abi_long fifth)
3120 version = call >> 16;
3125 ret = do_semop(first, ptr, second);
3129 ret = get_errno(semget(first, second, third));
3133 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3137 ret = get_errno(msgget(first, second));
3141 ret = do_msgsnd(first, ptr, second, third);
3145 ret = do_msgctl(first, second, ptr);
3152 struct target_ipc_kludge {
3157 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3158 ret = -TARGET_EFAULT;
3162 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3164 unlock_user_struct(tmp, ptr, 0);
3168 ret = do_msgrcv(first, ptr, second, fifth, third);
3177 raddr = do_shmat(first, ptr, second);
3178 if (is_error(raddr))
3179 return get_errno(raddr);
3180 if (put_user_ual(raddr, third))
3181 return -TARGET_EFAULT;
3185 ret = -TARGET_EINVAL;
3190 ret = do_shmdt(ptr);
3194 /* IPC_* flag values are the same on all linux platforms */
3195 ret = get_errno(shmget(first, second, third));
3198 /* IPC_* and SHM_* command values are the same on all linux platforms */
3200 ret = do_shmctl(first, second, ptr);
3203 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3204 ret = -TARGET_ENOSYS;
3211 /* kernel structure types definitions */
3213 #define STRUCT(name, ...) STRUCT_ ## name,
3214 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3216 #include "syscall_types.h"
3219 #undef STRUCT_SPECIAL
3221 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3222 #define STRUCT_SPECIAL(name)
3223 #include "syscall_types.h"
3225 #undef STRUCT_SPECIAL
3227 typedef struct IOCTLEntry IOCTLEntry;
3229 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3230 int fd, abi_long cmd, abi_long arg);
3233 unsigned int target_cmd;
3234 unsigned int host_cmd;
3237 do_ioctl_fn *do_ioctl;
3238 const argtype arg_type[5];
3241 #define IOC_R 0x0001
3242 #define IOC_W 0x0002
3243 #define IOC_RW (IOC_R | IOC_W)
3245 #define MAX_STRUCT_SIZE 4096
3247 #ifdef CONFIG_FIEMAP
3248 /* So fiemap access checks don't overflow on 32 bit systems.
3249 * This is very slightly smaller than the limit imposed by
3250 * the underlying kernel.
3252 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3253 / sizeof(struct fiemap_extent))
3255 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3256 int fd, abi_long cmd, abi_long arg)
3258 /* The parameter for this ioctl is a struct fiemap followed
3259 * by an array of struct fiemap_extent whose size is set
3260 * in fiemap->fm_extent_count. The array is filled in by the
3263 int target_size_in, target_size_out;
3265 const argtype *arg_type = ie->arg_type;
3266 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3269 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3273 assert(arg_type[0] == TYPE_PTR);
3274 assert(ie->access == IOC_RW);
3276 target_size_in = thunk_type_size(arg_type, 0);
3277 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3279 return -TARGET_EFAULT;
3281 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3282 unlock_user(argptr, arg, 0);
3283 fm = (struct fiemap *)buf_temp;
3284 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3285 return -TARGET_EINVAL;
3288 outbufsz = sizeof (*fm) +
3289 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3291 if (outbufsz > MAX_STRUCT_SIZE) {
3292 /* We can't fit all the extents into the fixed size buffer.
3293 * Allocate one that is large enough and use it instead.
3295 fm = malloc(outbufsz);
3297 return -TARGET_ENOMEM;
3299 memcpy(fm, buf_temp, sizeof(struct fiemap));
3302 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3303 if (!is_error(ret)) {
3304 target_size_out = target_size_in;
3305 /* An extent_count of 0 means we were only counting the extents
3306 * so there are no structs to copy
3308 if (fm->fm_extent_count != 0) {
3309 target_size_out += fm->fm_mapped_extents * extent_size;
3311 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3313 ret = -TARGET_EFAULT;
3315 /* Convert the struct fiemap */
3316 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3317 if (fm->fm_extent_count != 0) {
3318 p = argptr + target_size_in;
3319 /* ...and then all the struct fiemap_extents */
3320 for (i = 0; i < fm->fm_mapped_extents; i++) {
3321 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3326 unlock_user(argptr, arg, target_size_out);
3336 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3337 int fd, abi_long cmd, abi_long arg)
3339 const argtype *arg_type = ie->arg_type;
3343 struct ifconf *host_ifconf;
3345 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3346 int target_ifreq_size;
3351 abi_long target_ifc_buf;
3355 assert(arg_type[0] == TYPE_PTR);
3356 assert(ie->access == IOC_RW);
3359 target_size = thunk_type_size(arg_type, 0);
3361 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3363 return -TARGET_EFAULT;
3364 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3365 unlock_user(argptr, arg, 0);
3367 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3368 target_ifc_len = host_ifconf->ifc_len;
3369 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3371 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3372 nb_ifreq = target_ifc_len / target_ifreq_size;
3373 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3375 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3376 if (outbufsz > MAX_STRUCT_SIZE) {
3377 /* We can't fit all the extents into the fixed size buffer.
3378 * Allocate one that is large enough and use it instead.
3380 host_ifconf = malloc(outbufsz);
3382 return -TARGET_ENOMEM;
3384 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3387 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3389 host_ifconf->ifc_len = host_ifc_len;
3390 host_ifconf->ifc_buf = host_ifc_buf;
3392 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3393 if (!is_error(ret)) {
3394 /* convert host ifc_len to target ifc_len */
3396 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3397 target_ifc_len = nb_ifreq * target_ifreq_size;
3398 host_ifconf->ifc_len = target_ifc_len;
3400 /* restore target ifc_buf */
3402 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3404 /* copy struct ifconf to target user */
3406 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3408 return -TARGET_EFAULT;
3409 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3410 unlock_user(argptr, arg, target_size);
3412 /* copy ifreq[] to target user */
3414 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3415 for (i = 0; i < nb_ifreq ; i++) {
3416 thunk_convert(argptr + i * target_ifreq_size,
3417 host_ifc_buf + i * sizeof(struct ifreq),
3418 ifreq_arg_type, THUNK_TARGET);
3420 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3430 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3431 abi_long cmd, abi_long arg)
3434 struct dm_ioctl *host_dm;
3435 abi_long guest_data;
3436 uint32_t guest_data_size;
3438 const argtype *arg_type = ie->arg_type;
3440 void *big_buf = NULL;
3444 target_size = thunk_type_size(arg_type, 0);
3445 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3447 ret = -TARGET_EFAULT;
3450 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3451 unlock_user(argptr, arg, 0);
3453 /* buf_temp is too small, so fetch things into a bigger buffer */
3454 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3455 memcpy(big_buf, buf_temp, target_size);
3459 guest_data = arg + host_dm->data_start;
3460 if ((guest_data - arg) < 0) {
3464 guest_data_size = host_dm->data_size - host_dm->data_start;
3465 host_data = (char*)host_dm + host_dm->data_start;
3467 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3468 switch (ie->host_cmd) {
3470 case DM_LIST_DEVICES:
3473 case DM_DEV_SUSPEND:
3476 case DM_TABLE_STATUS:
3477 case DM_TABLE_CLEAR:
3479 case DM_LIST_VERSIONS:
3483 case DM_DEV_SET_GEOMETRY:
3484 /* data contains only strings */
3485 memcpy(host_data, argptr, guest_data_size);
3488 memcpy(host_data, argptr, guest_data_size);
3489 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3493 void *gspec = argptr;
3494 void *cur_data = host_data;
3495 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3496 int spec_size = thunk_type_size(arg_type, 0);
3499 for (i = 0; i < host_dm->target_count; i++) {
3500 struct dm_target_spec *spec = cur_data;
3504 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3505 slen = strlen((char*)gspec + spec_size) + 1;
3507 spec->next = sizeof(*spec) + slen;
3508 strcpy((char*)&spec[1], gspec + spec_size);
3510 cur_data += spec->next;
3515 ret = -TARGET_EINVAL;
3518 unlock_user(argptr, guest_data, 0);
3520 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3521 if (!is_error(ret)) {
3522 guest_data = arg + host_dm->data_start;
3523 guest_data_size = host_dm->data_size - host_dm->data_start;
3524 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3525 switch (ie->host_cmd) {
3530 case DM_DEV_SUSPEND:
3533 case DM_TABLE_CLEAR:
3535 case DM_DEV_SET_GEOMETRY:
3536 /* no return data */
3538 case DM_LIST_DEVICES:
3540 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3541 uint32_t remaining_data = guest_data_size;
3542 void *cur_data = argptr;
3543 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3544 int nl_size = 12; /* can't use thunk_size due to alignment */
3547 uint32_t next = nl->next;
3549 nl->next = nl_size + (strlen(nl->name) + 1);
3551 if (remaining_data < nl->next) {
3552 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3555 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3556 strcpy(cur_data + nl_size, nl->name);
3557 cur_data += nl->next;
3558 remaining_data -= nl->next;
3562 nl = (void*)nl + next;
3567 case DM_TABLE_STATUS:
3569 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3570 void *cur_data = argptr;
3571 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3572 int spec_size = thunk_type_size(arg_type, 0);
3575 for (i = 0; i < host_dm->target_count; i++) {
3576 uint32_t next = spec->next;
3577 int slen = strlen((char*)&spec[1]) + 1;
3578 spec->next = (cur_data - argptr) + spec_size + slen;
3579 if (guest_data_size < spec->next) {
3580 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3583 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3584 strcpy(cur_data + spec_size, (char*)&spec[1]);
3585 cur_data = argptr + spec->next;
3586 spec = (void*)host_dm + host_dm->data_start + next;
3592 void *hdata = (void*)host_dm + host_dm->data_start;
3593 int count = *(uint32_t*)hdata;
3594 uint64_t *hdev = hdata + 8;
3595 uint64_t *gdev = argptr + 8;
3598 *(uint32_t*)argptr = tswap32(count);
3599 for (i = 0; i < count; i++) {
3600 *gdev = tswap64(*hdev);
3606 case DM_LIST_VERSIONS:
3608 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3609 uint32_t remaining_data = guest_data_size;
3610 void *cur_data = argptr;
3611 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3612 int vers_size = thunk_type_size(arg_type, 0);
3615 uint32_t next = vers->next;
3617 vers->next = vers_size + (strlen(vers->name) + 1);
3619 if (remaining_data < vers->next) {
3620 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3623 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3624 strcpy(cur_data + vers_size, vers->name);
3625 cur_data += vers->next;
3626 remaining_data -= vers->next;
3630 vers = (void*)vers + next;
3635 ret = -TARGET_EINVAL;
3638 unlock_user(argptr, guest_data, guest_data_size);
3640 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3642 ret = -TARGET_EFAULT;
3645 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3646 unlock_user(argptr, arg, target_size);
3653 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3654 int fd, abi_long cmd, abi_long arg)
3656 const argtype *arg_type = ie->arg_type;
3657 const StructEntry *se;
3658 const argtype *field_types;
3659 const int *dst_offsets, *src_offsets;
3662 abi_ulong *target_rt_dev_ptr;
3663 unsigned long *host_rt_dev_ptr;
3667 assert(ie->access == IOC_W);
3668 assert(*arg_type == TYPE_PTR);
3670 assert(*arg_type == TYPE_STRUCT);
3671 target_size = thunk_type_size(arg_type, 0);
3672 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3674 return -TARGET_EFAULT;
3677 assert(*arg_type == (int)STRUCT_rtentry);
3678 se = struct_entries + *arg_type++;
3679 assert(se->convert[0] == NULL);
3680 /* convert struct here to be able to catch rt_dev string */
3681 field_types = se->field_types;
3682 dst_offsets = se->field_offsets[THUNK_HOST];
3683 src_offsets = se->field_offsets[THUNK_TARGET];
3684 for (i = 0; i < se->nb_fields; i++) {
3685 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3686 assert(*field_types == TYPE_PTRVOID);
3687 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3688 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3689 if (*target_rt_dev_ptr != 0) {
3690 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3691 tswapal(*target_rt_dev_ptr));
3692 if (!*host_rt_dev_ptr) {
3693 unlock_user(argptr, arg, 0);
3694 return -TARGET_EFAULT;
3697 *host_rt_dev_ptr = 0;
3702 field_types = thunk_convert(buf_temp + dst_offsets[i],
3703 argptr + src_offsets[i],
3704 field_types, THUNK_HOST);
3706 unlock_user(argptr, arg, 0);
3708 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3709 if (*host_rt_dev_ptr != 0) {
3710 unlock_user((void *)*host_rt_dev_ptr,
3711 *target_rt_dev_ptr, 0);
3716 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
3717 int fd, abi_long cmd, abi_long arg)
3719 int sig = target_to_host_signal(arg);
3720 return get_errno(ioctl(fd, ie->host_cmd, sig));
3723 static IOCTLEntry ioctl_entries[] = {
3724 #define IOCTL(cmd, access, ...) \
3725 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3726 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3727 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3732 /* ??? Implement proper locking for ioctls. */
3733 /* do_ioctl() Must return target values and target errnos. */
3734 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3736 const IOCTLEntry *ie;
3737 const argtype *arg_type;
3739 uint8_t buf_temp[MAX_STRUCT_SIZE];
3745 if (ie->target_cmd == 0) {
3746 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3747 return -TARGET_ENOSYS;
3749 if (ie->target_cmd == cmd)
3753 arg_type = ie->arg_type;
3755 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3758 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3761 switch(arg_type[0]) {
3764 ret = get_errno(ioctl(fd, ie->host_cmd));
3769 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3773 target_size = thunk_type_size(arg_type, 0);
3774 switch(ie->access) {
3776 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3777 if (!is_error(ret)) {
3778 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3780 return -TARGET_EFAULT;
3781 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3782 unlock_user(argptr, arg, target_size);
3786 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3788 return -TARGET_EFAULT;
3789 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3790 unlock_user(argptr, arg, 0);
3791 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3795 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3797 return -TARGET_EFAULT;
3798 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3799 unlock_user(argptr, arg, 0);
3800 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3801 if (!is_error(ret)) {
3802 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3804 return -TARGET_EFAULT;
3805 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3806 unlock_user(argptr, arg, target_size);
3812 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3813 (long)cmd, arg_type[0]);
3814 ret = -TARGET_ENOSYS;
3820 static const bitmask_transtbl iflag_tbl[] = {
3821 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3822 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3823 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3824 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3825 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3826 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3827 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3828 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3829 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3830 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3831 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3832 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3833 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3834 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3838 static const bitmask_transtbl oflag_tbl[] = {
3839 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3840 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3841 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3842 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3843 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3844 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3845 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3846 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3847 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3848 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3849 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3850 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3851 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3852 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3853 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3854 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3855 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3856 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3857 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3858 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3859 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3860 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3861 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3862 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3866 static const bitmask_transtbl cflag_tbl[] = {
3867 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3868 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3869 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3870 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3871 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3872 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3873 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3874 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3875 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3876 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3877 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3878 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3879 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3880 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3881 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3882 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3883 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3884 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3885 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3886 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3887 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3888 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3889 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3890 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3891 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3892 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3893 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3894 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3895 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3896 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3897 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3901 static const bitmask_transtbl lflag_tbl[] = {
3902 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3903 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3904 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3905 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3906 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3907 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3908 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3909 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3910 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3911 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3912 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3913 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3914 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3915 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3916 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3920 static void target_to_host_termios (void *dst, const void *src)
3922 struct host_termios *host = dst;
3923 const struct target_termios *target = src;
3926 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3928 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3930 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3932 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3933 host->c_line = target->c_line;
3935 memset(host->c_cc, 0, sizeof(host->c_cc));
3936 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3937 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3938 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3939 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3940 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3941 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3942 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3943 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3944 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3945 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3946 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3947 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3948 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3949 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3950 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3951 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3952 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3955 static void host_to_target_termios (void *dst, const void *src)
3957 struct target_termios *target = dst;
3958 const struct host_termios *host = src;
3961 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3963 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3965 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3967 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3968 target->c_line = host->c_line;
3970 memset(target->c_cc, 0, sizeof(target->c_cc));
3971 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3972 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3973 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3974 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3975 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3976 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3977 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3978 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3979 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3980 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3981 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3982 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3983 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3984 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3985 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3986 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3987 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3990 static const StructEntry struct_termios_def = {
3991 .convert = { host_to_target_termios, target_to_host_termios },
3992 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3993 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3996 static bitmask_transtbl mmap_flags_tbl[] = {
3997 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3998 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3999 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4000 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4001 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4002 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4003 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4004 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4005 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4010 #if defined(TARGET_I386)
4012 /* NOTE: there is really one LDT for all the threads */
4013 static uint8_t *ldt_table;
4015 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4022 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4023 if (size > bytecount)
4025 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4027 return -TARGET_EFAULT;
4028 /* ??? Should this by byteswapped? */
4029 memcpy(p, ldt_table, size);
4030 unlock_user(p, ptr, size);
4034 /* XXX: add locking support */
4035 static abi_long write_ldt(CPUX86State *env,
4036 abi_ulong ptr, unsigned long bytecount, int oldmode)
4038 struct target_modify_ldt_ldt_s ldt_info;
4039 struct target_modify_ldt_ldt_s *target_ldt_info;
4040 int seg_32bit, contents, read_exec_only, limit_in_pages;
4041 int seg_not_present, useable, lm;
4042 uint32_t *lp, entry_1, entry_2;
4044 if (bytecount != sizeof(ldt_info))
4045 return -TARGET_EINVAL;
4046 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4047 return -TARGET_EFAULT;
4048 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4049 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4050 ldt_info.limit = tswap32(target_ldt_info->limit);
4051 ldt_info.flags = tswap32(target_ldt_info->flags);
4052 unlock_user_struct(target_ldt_info, ptr, 0);
4054 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4055 return -TARGET_EINVAL;
4056 seg_32bit = ldt_info.flags & 1;
4057 contents = (ldt_info.flags >> 1) & 3;
4058 read_exec_only = (ldt_info.flags >> 3) & 1;
4059 limit_in_pages = (ldt_info.flags >> 4) & 1;
4060 seg_not_present = (ldt_info.flags >> 5) & 1;
4061 useable = (ldt_info.flags >> 6) & 1;
4065 lm = (ldt_info.flags >> 7) & 1;
4067 if (contents == 3) {
4069 return -TARGET_EINVAL;
4070 if (seg_not_present == 0)
4071 return -TARGET_EINVAL;
4073 /* allocate the LDT */
4075 env->ldt.base = target_mmap(0,
4076 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4077 PROT_READ|PROT_WRITE,
4078 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4079 if (env->ldt.base == -1)
4080 return -TARGET_ENOMEM;
4081 memset(g2h(env->ldt.base), 0,
4082 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4083 env->ldt.limit = 0xffff;
4084 ldt_table = g2h(env->ldt.base);
4087 /* NOTE: same code as Linux kernel */
4088 /* Allow LDTs to be cleared by the user. */
4089 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4092 read_exec_only == 1 &&
4094 limit_in_pages == 0 &&
4095 seg_not_present == 1 &&
4103 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4104 (ldt_info.limit & 0x0ffff);
4105 entry_2 = (ldt_info.base_addr & 0xff000000) |
4106 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4107 (ldt_info.limit & 0xf0000) |
4108 ((read_exec_only ^ 1) << 9) |
4110 ((seg_not_present ^ 1) << 15) |
4112 (limit_in_pages << 23) |
4116 entry_2 |= (useable << 20);
4118 /* Install the new entry ... */
4120 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4121 lp[0] = tswap32(entry_1);
4122 lp[1] = tswap32(entry_2);
4126 /* specific and weird i386 syscalls */
4127 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4128 unsigned long bytecount)
4134 ret = read_ldt(ptr, bytecount);
4137 ret = write_ldt(env, ptr, bytecount, 1);
4140 ret = write_ldt(env, ptr, bytecount, 0);
4143 ret = -TARGET_ENOSYS;
4149 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4150 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4152 uint64_t *gdt_table = g2h(env->gdt.base);
4153 struct target_modify_ldt_ldt_s ldt_info;
4154 struct target_modify_ldt_ldt_s *target_ldt_info;
4155 int seg_32bit, contents, read_exec_only, limit_in_pages;
4156 int seg_not_present, useable, lm;
4157 uint32_t *lp, entry_1, entry_2;
4160 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4161 if (!target_ldt_info)
4162 return -TARGET_EFAULT;
4163 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4164 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4165 ldt_info.limit = tswap32(target_ldt_info->limit);
4166 ldt_info.flags = tswap32(target_ldt_info->flags);
4167 if (ldt_info.entry_number == -1) {
4168 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4169 if (gdt_table[i] == 0) {
4170 ldt_info.entry_number = i;
4171 target_ldt_info->entry_number = tswap32(i);
4176 unlock_user_struct(target_ldt_info, ptr, 1);
4178 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4179 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4180 return -TARGET_EINVAL;
4181 seg_32bit = ldt_info.flags & 1;
4182 contents = (ldt_info.flags >> 1) & 3;
4183 read_exec_only = (ldt_info.flags >> 3) & 1;
4184 limit_in_pages = (ldt_info.flags >> 4) & 1;
4185 seg_not_present = (ldt_info.flags >> 5) & 1;
4186 useable = (ldt_info.flags >> 6) & 1;
4190 lm = (ldt_info.flags >> 7) & 1;
4193 if (contents == 3) {
4194 if (seg_not_present == 0)
4195 return -TARGET_EINVAL;
4198 /* NOTE: same code as Linux kernel */
4199 /* Allow LDTs to be cleared by the user. */
4200 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4201 if ((contents == 0 &&
4202 read_exec_only == 1 &&
4204 limit_in_pages == 0 &&
4205 seg_not_present == 1 &&
4213 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4214 (ldt_info.limit & 0x0ffff);
4215 entry_2 = (ldt_info.base_addr & 0xff000000) |
4216 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4217 (ldt_info.limit & 0xf0000) |
4218 ((read_exec_only ^ 1) << 9) |
4220 ((seg_not_present ^ 1) << 15) |
4222 (limit_in_pages << 23) |
4227 /* Install the new entry ... */
4229 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4230 lp[0] = tswap32(entry_1);
4231 lp[1] = tswap32(entry_2);
4235 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4237 struct target_modify_ldt_ldt_s *target_ldt_info;
4238 uint64_t *gdt_table = g2h(env->gdt.base);
4239 uint32_t base_addr, limit, flags;
4240 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4241 int seg_not_present, useable, lm;
4242 uint32_t *lp, entry_1, entry_2;
4244 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4245 if (!target_ldt_info)
4246 return -TARGET_EFAULT;
4247 idx = tswap32(target_ldt_info->entry_number);
4248 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4249 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4250 unlock_user_struct(target_ldt_info, ptr, 1);
4251 return -TARGET_EINVAL;
4253 lp = (uint32_t *)(gdt_table + idx);
4254 entry_1 = tswap32(lp[0]);
4255 entry_2 = tswap32(lp[1]);
4257 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4258 contents = (entry_2 >> 10) & 3;
4259 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4260 seg_32bit = (entry_2 >> 22) & 1;
4261 limit_in_pages = (entry_2 >> 23) & 1;
4262 useable = (entry_2 >> 20) & 1;
4266 lm = (entry_2 >> 21) & 1;
4268 flags = (seg_32bit << 0) | (contents << 1) |
4269 (read_exec_only << 3) | (limit_in_pages << 4) |
4270 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4271 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4272 base_addr = (entry_1 >> 16) |
4273 (entry_2 & 0xff000000) |
4274 ((entry_2 & 0xff) << 16);
4275 target_ldt_info->base_addr = tswapal(base_addr);
4276 target_ldt_info->limit = tswap32(limit);
4277 target_ldt_info->flags = tswap32(flags);
4278 unlock_user_struct(target_ldt_info, ptr, 1);
4281 #endif /* TARGET_I386 && TARGET_ABI32 */
4283 #ifndef TARGET_ABI32
4284 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4291 case TARGET_ARCH_SET_GS:
4292 case TARGET_ARCH_SET_FS:
4293 if (code == TARGET_ARCH_SET_GS)
4297 cpu_x86_load_seg(env, idx, 0);
4298 env->segs[idx].base = addr;
4300 case TARGET_ARCH_GET_GS:
4301 case TARGET_ARCH_GET_FS:
4302 if (code == TARGET_ARCH_GET_GS)
4306 val = env->segs[idx].base;
4307 if (put_user(val, addr, abi_ulong))
4308 ret = -TARGET_EFAULT;
4311 ret = -TARGET_EINVAL;
4318 #endif /* defined(TARGET_I386) */
4320 #define NEW_STACK_SIZE 0x40000
4323 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4326 pthread_mutex_t mutex;
4327 pthread_cond_t cond;
4330 abi_ulong child_tidptr;
4331 abi_ulong parent_tidptr;
4335 static void *clone_func(void *arg)
4337 new_thread_info *info = arg;
4343 cpu = ENV_GET_CPU(env);
4345 ts = (TaskState *)cpu->opaque;
4346 info->tid = gettid();
4347 cpu->host_tid = info->tid;
4349 if (info->child_tidptr)
4350 put_user_u32(info->tid, info->child_tidptr);
4351 if (info->parent_tidptr)
4352 put_user_u32(info->tid, info->parent_tidptr);
4353 /* Enable signals. */
4354 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4355 /* Signal to the parent that we're ready. */
4356 pthread_mutex_lock(&info->mutex);
4357 pthread_cond_broadcast(&info->cond);
4358 pthread_mutex_unlock(&info->mutex);
4359 /* Wait until the parent has finshed initializing the tls state. */
4360 pthread_mutex_lock(&clone_lock);
4361 pthread_mutex_unlock(&clone_lock);
4367 /* do_fork() Must return host values and target errnos (unlike most
4368 do_*() functions). */
4369 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4370 abi_ulong parent_tidptr, target_ulong newtls,
4371 abi_ulong child_tidptr)
4373 CPUState *cpu = ENV_GET_CPU(env);
4377 CPUArchState *new_env;
4378 unsigned int nptl_flags;
4381 /* Emulate vfork() with fork() */
4382 if (flags & CLONE_VFORK)
4383 flags &= ~(CLONE_VFORK | CLONE_VM);
4385 if (flags & CLONE_VM) {
4386 TaskState *parent_ts = (TaskState *)cpu->opaque;
4387 new_thread_info info;
4388 pthread_attr_t attr;
4390 ts = g_malloc0(sizeof(TaskState));
4391 init_task_state(ts);
4392 /* we create a new CPU instance. */
4393 new_env = cpu_copy(env);
4394 /* Init regs that differ from the parent. */
4395 cpu_clone_regs(new_env, newsp);
4396 new_cpu = ENV_GET_CPU(new_env);
4397 new_cpu->opaque = ts;
4398 ts->bprm = parent_ts->bprm;
4399 ts->info = parent_ts->info;
4401 flags &= ~CLONE_NPTL_FLAGS2;
4403 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4404 ts->child_tidptr = child_tidptr;
4407 if (nptl_flags & CLONE_SETTLS)
4408 cpu_set_tls (new_env, newtls);
4410 /* Grab a mutex so that thread setup appears atomic. */
4411 pthread_mutex_lock(&clone_lock);
4413 memset(&info, 0, sizeof(info));
4414 pthread_mutex_init(&info.mutex, NULL);
4415 pthread_mutex_lock(&info.mutex);
4416 pthread_cond_init(&info.cond, NULL);
4418 if (nptl_flags & CLONE_CHILD_SETTID)
4419 info.child_tidptr = child_tidptr;
4420 if (nptl_flags & CLONE_PARENT_SETTID)
4421 info.parent_tidptr = parent_tidptr;
4423 ret = pthread_attr_init(&attr);
4424 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4425 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4426 /* It is not safe to deliver signals until the child has finished
4427 initializing, so temporarily block all signals. */
4428 sigfillset(&sigmask);
4429 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4431 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4432 /* TODO: Free new CPU state if thread creation failed. */
4434 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4435 pthread_attr_destroy(&attr);
4437 /* Wait for the child to initialize. */
4438 pthread_cond_wait(&info.cond, &info.mutex);
4440 if (flags & CLONE_PARENT_SETTID)
4441 put_user_u32(ret, parent_tidptr);
4445 pthread_mutex_unlock(&info.mutex);
4446 pthread_cond_destroy(&info.cond);
4447 pthread_mutex_destroy(&info.mutex);
4448 pthread_mutex_unlock(&clone_lock);
4450 /* if no CLONE_VM, we consider it is a fork */
4451 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4456 /* Child Process. */
4457 cpu_clone_regs(env, newsp);
4459 /* There is a race condition here. The parent process could
4460 theoretically read the TID in the child process before the child
4461 tid is set. This would require using either ptrace
4462 (not implemented) or having *_tidptr to point at a shared memory
4463 mapping. We can't repeat the spinlock hack used above because
4464 the child process gets its own copy of the lock. */
4465 if (flags & CLONE_CHILD_SETTID)
4466 put_user_u32(gettid(), child_tidptr);
4467 if (flags & CLONE_PARENT_SETTID)
4468 put_user_u32(gettid(), parent_tidptr);
4469 ts = (TaskState *)cpu->opaque;
4470 if (flags & CLONE_SETTLS)
4471 cpu_set_tls (env, newtls);
4472 if (flags & CLONE_CHILD_CLEARTID)
4473 ts->child_tidptr = child_tidptr;
4481 /* warning : doesn't handle linux specific flags... */
4482 static int target_to_host_fcntl_cmd(int cmd)
4485 case TARGET_F_DUPFD:
4486 case TARGET_F_GETFD:
4487 case TARGET_F_SETFD:
4488 case TARGET_F_GETFL:
4489 case TARGET_F_SETFL:
4491 case TARGET_F_GETLK:
4493 case TARGET_F_SETLK:
4495 case TARGET_F_SETLKW:
4497 case TARGET_F_GETOWN:
4499 case TARGET_F_SETOWN:
4501 case TARGET_F_GETSIG:
4503 case TARGET_F_SETSIG:
4505 #if TARGET_ABI_BITS == 32
4506 case TARGET_F_GETLK64:
4508 case TARGET_F_SETLK64:
4510 case TARGET_F_SETLKW64:
4513 case TARGET_F_SETLEASE:
4515 case TARGET_F_GETLEASE:
4517 #ifdef F_DUPFD_CLOEXEC
4518 case TARGET_F_DUPFD_CLOEXEC:
4519 return F_DUPFD_CLOEXEC;
4521 case TARGET_F_NOTIFY:
4524 case TARGET_F_GETOWN_EX:
4528 case TARGET_F_SETOWN_EX:
4532 return -TARGET_EINVAL;
4534 return -TARGET_EINVAL;
4537 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4538 static const bitmask_transtbl flock_tbl[] = {
4539 TRANSTBL_CONVERT(F_RDLCK),
4540 TRANSTBL_CONVERT(F_WRLCK),
4541 TRANSTBL_CONVERT(F_UNLCK),
4542 TRANSTBL_CONVERT(F_EXLCK),
4543 TRANSTBL_CONVERT(F_SHLCK),
4547 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4550 struct target_flock *target_fl;
4551 struct flock64 fl64;
4552 struct target_flock64 *target_fl64;
4554 struct f_owner_ex fox;
4555 struct target_f_owner_ex *target_fox;
4558 int host_cmd = target_to_host_fcntl_cmd(cmd);
4560 if (host_cmd == -TARGET_EINVAL)
4564 case TARGET_F_GETLK:
4565 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4566 return -TARGET_EFAULT;
4568 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4569 fl.l_whence = tswap16(target_fl->l_whence);
4570 fl.l_start = tswapal(target_fl->l_start);
4571 fl.l_len = tswapal(target_fl->l_len);
4572 fl.l_pid = tswap32(target_fl->l_pid);
4573 unlock_user_struct(target_fl, arg, 0);
4574 ret = get_errno(fcntl(fd, host_cmd, &fl));
4576 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4577 return -TARGET_EFAULT;
4579 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4580 target_fl->l_whence = tswap16(fl.l_whence);
4581 target_fl->l_start = tswapal(fl.l_start);
4582 target_fl->l_len = tswapal(fl.l_len);
4583 target_fl->l_pid = tswap32(fl.l_pid);
4584 unlock_user_struct(target_fl, arg, 1);
4588 case TARGET_F_SETLK:
4589 case TARGET_F_SETLKW:
4590 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4591 return -TARGET_EFAULT;
4593 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4594 fl.l_whence = tswap16(target_fl->l_whence);
4595 fl.l_start = tswapal(target_fl->l_start);
4596 fl.l_len = tswapal(target_fl->l_len);
4597 fl.l_pid = tswap32(target_fl->l_pid);
4598 unlock_user_struct(target_fl, arg, 0);
4599 ret = get_errno(fcntl(fd, host_cmd, &fl));
4602 case TARGET_F_GETLK64:
4603 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4604 return -TARGET_EFAULT;
4606 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4607 fl64.l_whence = tswap16(target_fl64->l_whence);
4608 fl64.l_start = tswap64(target_fl64->l_start);
4609 fl64.l_len = tswap64(target_fl64->l_len);
4610 fl64.l_pid = tswap32(target_fl64->l_pid);
4611 unlock_user_struct(target_fl64, arg, 0);
4612 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4614 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4615 return -TARGET_EFAULT;
4616 target_fl64->l_type =
4617 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4618 target_fl64->l_whence = tswap16(fl64.l_whence);
4619 target_fl64->l_start = tswap64(fl64.l_start);
4620 target_fl64->l_len = tswap64(fl64.l_len);
4621 target_fl64->l_pid = tswap32(fl64.l_pid);
4622 unlock_user_struct(target_fl64, arg, 1);
4625 case TARGET_F_SETLK64:
4626 case TARGET_F_SETLKW64:
4627 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4628 return -TARGET_EFAULT;
4630 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4631 fl64.l_whence = tswap16(target_fl64->l_whence);
4632 fl64.l_start = tswap64(target_fl64->l_start);
4633 fl64.l_len = tswap64(target_fl64->l_len);
4634 fl64.l_pid = tswap32(target_fl64->l_pid);
4635 unlock_user_struct(target_fl64, arg, 0);
4636 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4639 case TARGET_F_GETFL:
4640 ret = get_errno(fcntl(fd, host_cmd, arg));
4642 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4646 case TARGET_F_SETFL:
4647 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4651 case TARGET_F_GETOWN_EX:
4652 ret = get_errno(fcntl(fd, host_cmd, &fox));
4654 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4655 return -TARGET_EFAULT;
4656 target_fox->type = tswap32(fox.type);
4657 target_fox->pid = tswap32(fox.pid);
4658 unlock_user_struct(target_fox, arg, 1);
4664 case TARGET_F_SETOWN_EX:
4665 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4666 return -TARGET_EFAULT;
4667 fox.type = tswap32(target_fox->type);
4668 fox.pid = tswap32(target_fox->pid);
4669 unlock_user_struct(target_fox, arg, 0);
4670 ret = get_errno(fcntl(fd, host_cmd, &fox));
4674 case TARGET_F_SETOWN:
4675 case TARGET_F_GETOWN:
4676 case TARGET_F_SETSIG:
4677 case TARGET_F_GETSIG:
4678 case TARGET_F_SETLEASE:
4679 case TARGET_F_GETLEASE:
4680 ret = get_errno(fcntl(fd, host_cmd, arg));
4684 ret = get_errno(fcntl(fd, cmd, arg));
4692 static inline int high2lowuid(int uid)
4700 static inline int high2lowgid(int gid)
4708 static inline int low2highuid(int uid)
4710 if ((int16_t)uid == -1)
4716 static inline int low2highgid(int gid)
4718 if ((int16_t)gid == -1)
4723 static inline int tswapid(int id)
4728 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4730 #else /* !USE_UID16 */
4731 static inline int high2lowuid(int uid)
4735 static inline int high2lowgid(int gid)
4739 static inline int low2highuid(int uid)
4743 static inline int low2highgid(int gid)
4747 static inline int tswapid(int id)
4752 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4754 #endif /* USE_UID16 */
4756 void syscall_init(void)
4759 const argtype *arg_type;
4763 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4764 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4765 #include "syscall_types.h"
4767 #undef STRUCT_SPECIAL
4769 /* Build target_to_host_errno_table[] table from
4770 * host_to_target_errno_table[]. */
4771 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4772 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4775 /* we patch the ioctl size if necessary. We rely on the fact that
4776 no ioctl has all the bits at '1' in the size field */
4778 while (ie->target_cmd != 0) {
4779 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4780 TARGET_IOC_SIZEMASK) {
4781 arg_type = ie->arg_type;
4782 if (arg_type[0] != TYPE_PTR) {
4783 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4788 size = thunk_type_size(arg_type, 0);
4789 ie->target_cmd = (ie->target_cmd &
4790 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4791 (size << TARGET_IOC_SIZESHIFT);
4794 /* automatic consistency check if same arch */
4795 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4796 (defined(__x86_64__) && defined(TARGET_X86_64))
4797 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4798 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4799 ie->name, ie->target_cmd, ie->host_cmd);
4806 #if TARGET_ABI_BITS == 32
4807 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4809 #ifdef TARGET_WORDS_BIGENDIAN
4810 return ((uint64_t)word0 << 32) | word1;
4812 return ((uint64_t)word1 << 32) | word0;
4815 #else /* TARGET_ABI_BITS == 32 */
4816 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4820 #endif /* TARGET_ABI_BITS != 32 */
4822 #ifdef TARGET_NR_truncate64
4823 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4828 if (regpairs_aligned(cpu_env)) {
4832 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4836 #ifdef TARGET_NR_ftruncate64
4837 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4842 if (regpairs_aligned(cpu_env)) {
4846 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4850 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4851 abi_ulong target_addr)
4853 struct target_timespec *target_ts;
4855 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4856 return -TARGET_EFAULT;
4857 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4858 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4859 unlock_user_struct(target_ts, target_addr, 0);
4863 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4864 struct timespec *host_ts)
4866 struct target_timespec *target_ts;
4868 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4869 return -TARGET_EFAULT;
4870 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4871 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4872 unlock_user_struct(target_ts, target_addr, 1);
4876 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
4877 abi_ulong target_addr)
4879 struct target_itimerspec *target_itspec;
4881 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
4882 return -TARGET_EFAULT;
4885 host_itspec->it_interval.tv_sec =
4886 tswapal(target_itspec->it_interval.tv_sec);
4887 host_itspec->it_interval.tv_nsec =
4888 tswapal(target_itspec->it_interval.tv_nsec);
4889 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
4890 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
4892 unlock_user_struct(target_itspec, target_addr, 1);
4896 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
4897 struct itimerspec *host_its)
4899 struct target_itimerspec *target_itspec;
4901 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
4902 return -TARGET_EFAULT;
4905 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
4906 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
4908 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
4909 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
4911 unlock_user_struct(target_itspec, target_addr, 0);
4915 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
4916 abi_ulong target_addr)
4918 struct target_sigevent *target_sevp;
4920 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
4921 return -TARGET_EFAULT;
4924 /* This union is awkward on 64 bit systems because it has a 32 bit
4925 * integer and a pointer in it; we follow the conversion approach
4926 * used for handling sigval types in signal.c so the guest should get
4927 * the correct value back even if we did a 64 bit byteswap and it's
4928 * using the 32 bit integer.
4930 host_sevp->sigev_value.sival_ptr =
4931 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
4932 host_sevp->sigev_signo =
4933 target_to_host_signal(tswap32(target_sevp->sigev_signo));
4934 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
4935 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
4937 unlock_user_struct(target_sevp, target_addr, 1);
4941 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4942 static inline abi_long host_to_target_stat64(void *cpu_env,
4943 abi_ulong target_addr,
4944 struct stat *host_st)
4946 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
4947 if (((CPUARMState *)cpu_env)->eabi) {
4948 struct target_eabi_stat64 *target_st;
4950 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4951 return -TARGET_EFAULT;
4952 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4953 __put_user(host_st->st_dev, &target_st->st_dev);
4954 __put_user(host_st->st_ino, &target_st->st_ino);
4955 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4956 __put_user(host_st->st_ino, &target_st->__st_ino);
4958 __put_user(host_st->st_mode, &target_st->st_mode);
4959 __put_user(host_st->st_nlink, &target_st->st_nlink);
4960 __put_user(host_st->st_uid, &target_st->st_uid);
4961 __put_user(host_st->st_gid, &target_st->st_gid);
4962 __put_user(host_st->st_rdev, &target_st->st_rdev);
4963 __put_user(host_st->st_size, &target_st->st_size);
4964 __put_user(host_st->st_blksize, &target_st->st_blksize);
4965 __put_user(host_st->st_blocks, &target_st->st_blocks);
4966 __put_user(host_st->st_atime, &target_st->target_st_atime);
4967 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4968 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4969 unlock_user_struct(target_st, target_addr, 1);
4973 #if defined(TARGET_HAS_STRUCT_STAT64)
4974 struct target_stat64 *target_st;
4976 struct target_stat *target_st;
4979 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4980 return -TARGET_EFAULT;
4981 memset(target_st, 0, sizeof(*target_st));
4982 __put_user(host_st->st_dev, &target_st->st_dev);
4983 __put_user(host_st->st_ino, &target_st->st_ino);
4984 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4985 __put_user(host_st->st_ino, &target_st->__st_ino);
4987 __put_user(host_st->st_mode, &target_st->st_mode);
4988 __put_user(host_st->st_nlink, &target_st->st_nlink);
4989 __put_user(host_st->st_uid, &target_st->st_uid);
4990 __put_user(host_st->st_gid, &target_st->st_gid);
4991 __put_user(host_st->st_rdev, &target_st->st_rdev);
4992 /* XXX: better use of kernel struct */
4993 __put_user(host_st->st_size, &target_st->st_size);
4994 __put_user(host_st->st_blksize, &target_st->st_blksize);
4995 __put_user(host_st->st_blocks, &target_st->st_blocks);
4996 __put_user(host_st->st_atime, &target_st->target_st_atime);
4997 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4998 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4999 unlock_user_struct(target_st, target_addr, 1);
5006 /* ??? Using host futex calls even when target atomic operations
5007 are not really atomic probably breaks things. However implementing
5008 futexes locally would make futexes shared between multiple processes
5009 tricky. However they're probably useless because guest atomic
5010 operations won't work either. */
5011 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5012 target_ulong uaddr2, int val3)
5014 struct timespec ts, *pts;
5017 /* ??? We assume FUTEX_* constants are the same on both host
5019 #ifdef FUTEX_CMD_MASK
5020 base_op = op & FUTEX_CMD_MASK;
5026 case FUTEX_WAIT_BITSET:
5029 target_to_host_timespec(pts, timeout);
5033 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5036 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5038 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5040 case FUTEX_CMP_REQUEUE:
5042 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5043 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5044 But the prototype takes a `struct timespec *'; insert casts
5045 to satisfy the compiler. We do not need to tswap TIMEOUT
5046 since it's not compared to guest memory. */
5047 pts = (struct timespec *)(uintptr_t) timeout;
5048 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5050 (base_op == FUTEX_CMP_REQUEUE
5054 return -TARGET_ENOSYS;
5058 /* Map host to target signal numbers for the wait family of syscalls.
5059 Assume all other status bits are the same. */
5060 int host_to_target_waitstatus(int status)
5062 if (WIFSIGNALED(status)) {
5063 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5065 if (WIFSTOPPED(status)) {
5066 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5072 static int open_self_cmdline(void *cpu_env, int fd)
5075 bool word_skipped = false;
5077 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5087 nb_read = read(fd_orig, buf, sizeof(buf));
5089 fd_orig = close(fd_orig);
5091 } else if (nb_read == 0) {
5095 if (!word_skipped) {
5096 /* Skip the first string, which is the path to qemu-*-static
5097 instead of the actual command. */
5098 cp_buf = memchr(buf, 0, sizeof(buf));
5100 /* Null byte found, skip one string */
5102 nb_read -= cp_buf - buf;
5103 word_skipped = true;
5108 if (write(fd, cp_buf, nb_read) != nb_read) {
5114 return close(fd_orig);
5117 static int open_self_maps(void *cpu_env, int fd)
5119 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5120 TaskState *ts = cpu->opaque;
5126 fp = fopen("/proc/self/maps", "r");
5131 while ((read = getline(&line, &len, fp)) != -1) {
5132 int fields, dev_maj, dev_min, inode;
5133 uint64_t min, max, offset;
5134 char flag_r, flag_w, flag_x, flag_p;
5135 char path[512] = "";
5136 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5137 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5138 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5140 if ((fields < 10) || (fields > 11)) {
5143 if (h2g_valid(min)) {
5144 int flags = page_get_flags(h2g(min));
5145 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5146 if (page_check_range(h2g(min), max - min, flags) == -1) {
5149 if (h2g(min) == ts->info->stack_limit) {
5150 pstrcpy(path, sizeof(path), " [stack]");
5152 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5153 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5154 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5155 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5156 path[0] ? " " : "", path);
5166 static int open_self_stat(void *cpu_env, int fd)
5168 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5169 TaskState *ts = cpu->opaque;
5170 abi_ulong start_stack = ts->info->start_stack;
5173 for (i = 0; i < 44; i++) {
5181 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5182 } else if (i == 1) {
5184 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5185 } else if (i == 27) {
5188 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5190 /* for the rest, there is MasterCard */
5191 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5195 if (write(fd, buf, len) != len) {
5203 static int open_self_auxv(void *cpu_env, int fd)
5205 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5206 TaskState *ts = cpu->opaque;
5207 abi_ulong auxv = ts->info->saved_auxv;
5208 abi_ulong len = ts->info->auxv_len;
5212 * Auxiliary vector is stored in target process stack.
5213 * read in whole auxv vector and copy it to file
5215 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5219 r = write(fd, ptr, len);
5226 lseek(fd, 0, SEEK_SET);
5227 unlock_user(ptr, auxv, len);
5233 static int is_proc_myself(const char *filename, const char *entry)
5235 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5236 filename += strlen("/proc/");
5237 if (!strncmp(filename, "self/", strlen("self/"))) {
5238 filename += strlen("self/");
5239 } else if (*filename >= '1' && *filename <= '9') {
5241 snprintf(myself, sizeof(myself), "%d/", getpid());
5242 if (!strncmp(filename, myself, strlen(myself))) {
5243 filename += strlen(myself);
5250 if (!strcmp(filename, entry)) {
5257 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5258 static int is_proc(const char *filename, const char *entry)
5260 return strcmp(filename, entry) == 0;
5263 static int open_net_route(void *cpu_env, int fd)
5270 fp = fopen("/proc/net/route", "r");
5277 read = getline(&line, &len, fp);
5278 dprintf(fd, "%s", line);
5282 while ((read = getline(&line, &len, fp)) != -1) {
5284 uint32_t dest, gw, mask;
5285 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5286 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5287 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5288 &mask, &mtu, &window, &irtt);
5289 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5290 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5291 metric, tswap32(mask), mtu, window, irtt);
5301 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5304 const char *filename;
5305 int (*fill)(void *cpu_env, int fd);
5306 int (*cmp)(const char *s1, const char *s2);
5308 const struct fake_open *fake_open;
5309 static const struct fake_open fakes[] = {
5310 { "maps", open_self_maps, is_proc_myself },
5311 { "stat", open_self_stat, is_proc_myself },
5312 { "auxv", open_self_auxv, is_proc_myself },
5313 { "cmdline", open_self_cmdline, is_proc_myself },
5314 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5315 { "/proc/net/route", open_net_route, is_proc },
5317 { NULL, NULL, NULL }
5320 if (is_proc_myself(pathname, "exe")) {
5321 int execfd = qemu_getauxval(AT_EXECFD);
5322 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5325 for (fake_open = fakes; fake_open->filename; fake_open++) {
5326 if (fake_open->cmp(pathname, fake_open->filename)) {
5331 if (fake_open->filename) {
5333 char filename[PATH_MAX];
5336 /* create temporary file to map stat to */
5337 tmpdir = getenv("TMPDIR");
5340 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5341 fd = mkstemp(filename);
5347 if ((r = fake_open->fill(cpu_env, fd))) {
5351 lseek(fd, 0, SEEK_SET);
5356 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5359 /* do_syscall() should always have a single exit point at the end so
5360 that actions, such as logging of syscall results, can be performed.
5361 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5362 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5363 abi_long arg2, abi_long arg3, abi_long arg4,
5364 abi_long arg5, abi_long arg6, abi_long arg7,
5367 CPUState *cpu = ENV_GET_CPU(cpu_env);
5374 gemu_log("syscall %d", num);
5377 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5380 case TARGET_NR_exit:
5381 /* In old applications this may be used to implement _exit(2).
5382 However in threaded applictions it is used for thread termination,
5383 and _exit_group is used for application termination.
5384 Do thread termination if we have more then one thread. */
5385 /* FIXME: This probably breaks if a signal arrives. We should probably
5386 be disabling signals. */
5387 if (CPU_NEXT(first_cpu)) {
5391 /* Remove the CPU from the list. */
5392 QTAILQ_REMOVE(&cpus, cpu, node);
5395 if (ts->child_tidptr) {
5396 put_user_u32(0, ts->child_tidptr);
5397 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5401 object_unref(OBJECT(cpu));
5408 gdb_exit(cpu_env, arg1);
5410 ret = 0; /* avoid warning */
5412 case TARGET_NR_read:
5416 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5418 ret = get_errno(read(arg1, p, arg3));
5419 unlock_user(p, arg2, ret);
5422 case TARGET_NR_write:
5423 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5425 ret = get_errno(write(arg1, p, arg3));
5426 unlock_user(p, arg2, 0);
5428 case TARGET_NR_open:
5429 if (!(p = lock_user_string(arg1)))
5431 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5432 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5434 unlock_user(p, arg1, 0);
5436 case TARGET_NR_openat:
5437 if (!(p = lock_user_string(arg2)))
5439 ret = get_errno(do_openat(cpu_env, arg1, p,
5440 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5442 unlock_user(p, arg2, 0);
5444 case TARGET_NR_close:
5445 ret = get_errno(close(arg1));
5450 case TARGET_NR_fork:
5451 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5453 #ifdef TARGET_NR_waitpid
5454 case TARGET_NR_waitpid:
5457 ret = get_errno(waitpid(arg1, &status, arg3));
5458 if (!is_error(ret) && arg2 && ret
5459 && put_user_s32(host_to_target_waitstatus(status), arg2))
5464 #ifdef TARGET_NR_waitid
5465 case TARGET_NR_waitid:
5469 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5470 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5471 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5473 host_to_target_siginfo(p, &info);
5474 unlock_user(p, arg3, sizeof(target_siginfo_t));
5479 #ifdef TARGET_NR_creat /* not on alpha */
5480 case TARGET_NR_creat:
5481 if (!(p = lock_user_string(arg1)))
5483 ret = get_errno(creat(p, arg2));
5484 unlock_user(p, arg1, 0);
5487 case TARGET_NR_link:
5490 p = lock_user_string(arg1);
5491 p2 = lock_user_string(arg2);
5493 ret = -TARGET_EFAULT;
5495 ret = get_errno(link(p, p2));
5496 unlock_user(p2, arg2, 0);
5497 unlock_user(p, arg1, 0);
5500 #if defined(TARGET_NR_linkat)
5501 case TARGET_NR_linkat:
5506 p = lock_user_string(arg2);
5507 p2 = lock_user_string(arg4);
5509 ret = -TARGET_EFAULT;
5511 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5512 unlock_user(p, arg2, 0);
5513 unlock_user(p2, arg4, 0);
5517 case TARGET_NR_unlink:
5518 if (!(p = lock_user_string(arg1)))
5520 ret = get_errno(unlink(p));
5521 unlock_user(p, arg1, 0);
5523 #if defined(TARGET_NR_unlinkat)
5524 case TARGET_NR_unlinkat:
5525 if (!(p = lock_user_string(arg2)))
5527 ret = get_errno(unlinkat(arg1, p, arg3));
5528 unlock_user(p, arg2, 0);
5531 case TARGET_NR_execve:
5533 char **argp, **envp;
5536 abi_ulong guest_argp;
5537 abi_ulong guest_envp;
5544 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5545 if (get_user_ual(addr, gp))
5553 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5554 if (get_user_ual(addr, gp))
5561 argp = alloca((argc + 1) * sizeof(void *));
5562 envp = alloca((envc + 1) * sizeof(void *));
5564 for (gp = guest_argp, q = argp; gp;
5565 gp += sizeof(abi_ulong), q++) {
5566 if (get_user_ual(addr, gp))
5570 if (!(*q = lock_user_string(addr)))
5572 total_size += strlen(*q) + 1;
5576 for (gp = guest_envp, q = envp; gp;
5577 gp += sizeof(abi_ulong), q++) {
5578 if (get_user_ual(addr, gp))
5582 if (!(*q = lock_user_string(addr)))
5584 total_size += strlen(*q) + 1;
5588 /* This case will not be caught by the host's execve() if its
5589 page size is bigger than the target's. */
5590 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5591 ret = -TARGET_E2BIG;
5594 if (!(p = lock_user_string(arg1)))
5596 ret = get_errno(execve(p, argp, envp));
5597 unlock_user(p, arg1, 0);
5602 ret = -TARGET_EFAULT;
5605 for (gp = guest_argp, q = argp; *q;
5606 gp += sizeof(abi_ulong), q++) {
5607 if (get_user_ual(addr, gp)
5610 unlock_user(*q, addr, 0);
5612 for (gp = guest_envp, q = envp; *q;
5613 gp += sizeof(abi_ulong), q++) {
5614 if (get_user_ual(addr, gp)
5617 unlock_user(*q, addr, 0);
5621 case TARGET_NR_chdir:
5622 if (!(p = lock_user_string(arg1)))
5624 ret = get_errno(chdir(p));
5625 unlock_user(p, arg1, 0);
5627 #ifdef TARGET_NR_time
5628 case TARGET_NR_time:
5631 ret = get_errno(time(&host_time));
5634 && put_user_sal(host_time, arg1))
5639 case TARGET_NR_mknod:
5640 if (!(p = lock_user_string(arg1)))
5642 ret = get_errno(mknod(p, arg2, arg3));
5643 unlock_user(p, arg1, 0);
5645 #if defined(TARGET_NR_mknodat)
5646 case TARGET_NR_mknodat:
5647 if (!(p = lock_user_string(arg2)))
5649 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5650 unlock_user(p, arg2, 0);
5653 case TARGET_NR_chmod:
5654 if (!(p = lock_user_string(arg1)))
5656 ret = get_errno(chmod(p, arg2));
5657 unlock_user(p, arg1, 0);
5659 #ifdef TARGET_NR_break
5660 case TARGET_NR_break:
5663 #ifdef TARGET_NR_oldstat
5664 case TARGET_NR_oldstat:
5667 case TARGET_NR_lseek:
5668 ret = get_errno(lseek(arg1, arg2, arg3));
5670 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5671 /* Alpha specific */
5672 case TARGET_NR_getxpid:
5673 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5674 ret = get_errno(getpid());
5677 #ifdef TARGET_NR_getpid
5678 case TARGET_NR_getpid:
5679 ret = get_errno(getpid());
5682 case TARGET_NR_mount:
5684 /* need to look at the data field */
5688 p = lock_user_string(arg1);
5696 p2 = lock_user_string(arg2);
5699 unlock_user(p, arg1, 0);
5705 p3 = lock_user_string(arg3);
5708 unlock_user(p, arg1, 0);
5710 unlock_user(p2, arg2, 0);
5717 /* FIXME - arg5 should be locked, but it isn't clear how to
5718 * do that since it's not guaranteed to be a NULL-terminated
5722 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
5724 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
5726 ret = get_errno(ret);
5729 unlock_user(p, arg1, 0);
5731 unlock_user(p2, arg2, 0);
5733 unlock_user(p3, arg3, 0);
5737 #ifdef TARGET_NR_umount
5738 case TARGET_NR_umount:
5739 if (!(p = lock_user_string(arg1)))
5741 ret = get_errno(umount(p));
5742 unlock_user(p, arg1, 0);
5745 #ifdef TARGET_NR_stime /* not on alpha */
5746 case TARGET_NR_stime:
5749 if (get_user_sal(host_time, arg1))
5751 ret = get_errno(stime(&host_time));
5755 case TARGET_NR_ptrace:
5757 #ifdef TARGET_NR_alarm /* not on alpha */
5758 case TARGET_NR_alarm:
5762 #ifdef TARGET_NR_oldfstat
5763 case TARGET_NR_oldfstat:
5766 #ifdef TARGET_NR_pause /* not on alpha */
5767 case TARGET_NR_pause:
5768 ret = get_errno(pause());
5771 #ifdef TARGET_NR_utime
5772 case TARGET_NR_utime:
5774 struct utimbuf tbuf, *host_tbuf;
5775 struct target_utimbuf *target_tbuf;
5777 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5779 tbuf.actime = tswapal(target_tbuf->actime);
5780 tbuf.modtime = tswapal(target_tbuf->modtime);
5781 unlock_user_struct(target_tbuf, arg2, 0);
5786 if (!(p = lock_user_string(arg1)))
5788 ret = get_errno(utime(p, host_tbuf));
5789 unlock_user(p, arg1, 0);
5793 case TARGET_NR_utimes:
5795 struct timeval *tvp, tv[2];
5797 if (copy_from_user_timeval(&tv[0], arg2)
5798 || copy_from_user_timeval(&tv[1],
5799 arg2 + sizeof(struct target_timeval)))
5805 if (!(p = lock_user_string(arg1)))
5807 ret = get_errno(utimes(p, tvp));
5808 unlock_user(p, arg1, 0);
5811 #if defined(TARGET_NR_futimesat)
5812 case TARGET_NR_futimesat:
5814 struct timeval *tvp, tv[2];
5816 if (copy_from_user_timeval(&tv[0], arg3)
5817 || copy_from_user_timeval(&tv[1],
5818 arg3 + sizeof(struct target_timeval)))
5824 if (!(p = lock_user_string(arg2)))
5826 ret = get_errno(futimesat(arg1, path(p), tvp));
5827 unlock_user(p, arg2, 0);
5831 #ifdef TARGET_NR_stty
5832 case TARGET_NR_stty:
5835 #ifdef TARGET_NR_gtty
5836 case TARGET_NR_gtty:
5839 case TARGET_NR_access:
5840 if (!(p = lock_user_string(arg1)))
5842 ret = get_errno(access(path(p), arg2));
5843 unlock_user(p, arg1, 0);
5845 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5846 case TARGET_NR_faccessat:
5847 if (!(p = lock_user_string(arg2)))
5849 ret = get_errno(faccessat(arg1, p, arg3, 0));
5850 unlock_user(p, arg2, 0);
5853 #ifdef TARGET_NR_nice /* not on alpha */
5854 case TARGET_NR_nice:
5855 ret = get_errno(nice(arg1));
5858 #ifdef TARGET_NR_ftime
5859 case TARGET_NR_ftime:
5862 case TARGET_NR_sync:
5866 case TARGET_NR_kill:
5867 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5869 case TARGET_NR_rename:
5872 p = lock_user_string(arg1);
5873 p2 = lock_user_string(arg2);
5875 ret = -TARGET_EFAULT;
5877 ret = get_errno(rename(p, p2));
5878 unlock_user(p2, arg2, 0);
5879 unlock_user(p, arg1, 0);
5882 #if defined(TARGET_NR_renameat)
5883 case TARGET_NR_renameat:
5886 p = lock_user_string(arg2);
5887 p2 = lock_user_string(arg4);
5889 ret = -TARGET_EFAULT;
5891 ret = get_errno(renameat(arg1, p, arg3, p2));
5892 unlock_user(p2, arg4, 0);
5893 unlock_user(p, arg2, 0);
5897 case TARGET_NR_mkdir:
5898 if (!(p = lock_user_string(arg1)))
5900 ret = get_errno(mkdir(p, arg2));
5901 unlock_user(p, arg1, 0);
5903 #if defined(TARGET_NR_mkdirat)
5904 case TARGET_NR_mkdirat:
5905 if (!(p = lock_user_string(arg2)))
5907 ret = get_errno(mkdirat(arg1, p, arg3));
5908 unlock_user(p, arg2, 0);
5911 case TARGET_NR_rmdir:
5912 if (!(p = lock_user_string(arg1)))
5914 ret = get_errno(rmdir(p));
5915 unlock_user(p, arg1, 0);
5918 ret = get_errno(dup(arg1));
5920 case TARGET_NR_pipe:
5921 ret = do_pipe(cpu_env, arg1, 0, 0);
5923 #ifdef TARGET_NR_pipe2
5924 case TARGET_NR_pipe2:
5925 ret = do_pipe(cpu_env, arg1,
5926 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5929 case TARGET_NR_times:
5931 struct target_tms *tmsp;
5933 ret = get_errno(times(&tms));
5935 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5938 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5939 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5940 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5941 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5944 ret = host_to_target_clock_t(ret);
5947 #ifdef TARGET_NR_prof
5948 case TARGET_NR_prof:
5951 #ifdef TARGET_NR_signal
5952 case TARGET_NR_signal:
5955 case TARGET_NR_acct:
5957 ret = get_errno(acct(NULL));
5959 if (!(p = lock_user_string(arg1)))
5961 ret = get_errno(acct(path(p)));
5962 unlock_user(p, arg1, 0);
5965 #ifdef TARGET_NR_umount2
5966 case TARGET_NR_umount2:
5967 if (!(p = lock_user_string(arg1)))
5969 ret = get_errno(umount2(p, arg2));
5970 unlock_user(p, arg1, 0);
5973 #ifdef TARGET_NR_lock
5974 case TARGET_NR_lock:
5977 case TARGET_NR_ioctl:
5978 ret = do_ioctl(arg1, arg2, arg3);
5980 case TARGET_NR_fcntl:
5981 ret = do_fcntl(arg1, arg2, arg3);
5983 #ifdef TARGET_NR_mpx
5987 case TARGET_NR_setpgid:
5988 ret = get_errno(setpgid(arg1, arg2));
5990 #ifdef TARGET_NR_ulimit
5991 case TARGET_NR_ulimit:
5994 #ifdef TARGET_NR_oldolduname
5995 case TARGET_NR_oldolduname:
5998 case TARGET_NR_umask:
5999 ret = get_errno(umask(arg1));
6001 case TARGET_NR_chroot:
6002 if (!(p = lock_user_string(arg1)))
6004 ret = get_errno(chroot(p));
6005 unlock_user(p, arg1, 0);
6007 case TARGET_NR_ustat:
6009 case TARGET_NR_dup2:
6010 ret = get_errno(dup2(arg1, arg2));
6012 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6013 case TARGET_NR_dup3:
6014 ret = get_errno(dup3(arg1, arg2, arg3));
6017 #ifdef TARGET_NR_getppid /* not on alpha */
6018 case TARGET_NR_getppid:
6019 ret = get_errno(getppid());
6022 case TARGET_NR_getpgrp:
6023 ret = get_errno(getpgrp());
6025 case TARGET_NR_setsid:
6026 ret = get_errno(setsid());
6028 #ifdef TARGET_NR_sigaction
6029 case TARGET_NR_sigaction:
6031 #if defined(TARGET_ALPHA)
6032 struct target_sigaction act, oact, *pact = 0;
6033 struct target_old_sigaction *old_act;
6035 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6037 act._sa_handler = old_act->_sa_handler;
6038 target_siginitset(&act.sa_mask, old_act->sa_mask);
6039 act.sa_flags = old_act->sa_flags;
6040 act.sa_restorer = 0;
6041 unlock_user_struct(old_act, arg2, 0);
6044 ret = get_errno(do_sigaction(arg1, pact, &oact));
6045 if (!is_error(ret) && arg3) {
6046 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6048 old_act->_sa_handler = oact._sa_handler;
6049 old_act->sa_mask = oact.sa_mask.sig[0];
6050 old_act->sa_flags = oact.sa_flags;
6051 unlock_user_struct(old_act, arg3, 1);
6053 #elif defined(TARGET_MIPS)
6054 struct target_sigaction act, oact, *pact, *old_act;
6057 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6059 act._sa_handler = old_act->_sa_handler;
6060 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6061 act.sa_flags = old_act->sa_flags;
6062 unlock_user_struct(old_act, arg2, 0);
6068 ret = get_errno(do_sigaction(arg1, pact, &oact));
6070 if (!is_error(ret) && arg3) {
6071 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6073 old_act->_sa_handler = oact._sa_handler;
6074 old_act->sa_flags = oact.sa_flags;
6075 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6076 old_act->sa_mask.sig[1] = 0;
6077 old_act->sa_mask.sig[2] = 0;
6078 old_act->sa_mask.sig[3] = 0;
6079 unlock_user_struct(old_act, arg3, 1);
6082 struct target_old_sigaction *old_act;
6083 struct target_sigaction act, oact, *pact;
6085 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6087 act._sa_handler = old_act->_sa_handler;
6088 target_siginitset(&act.sa_mask, old_act->sa_mask);
6089 act.sa_flags = old_act->sa_flags;
6090 act.sa_restorer = old_act->sa_restorer;
6091 unlock_user_struct(old_act, arg2, 0);
6096 ret = get_errno(do_sigaction(arg1, pact, &oact));
6097 if (!is_error(ret) && arg3) {
6098 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6100 old_act->_sa_handler = oact._sa_handler;
6101 old_act->sa_mask = oact.sa_mask.sig[0];
6102 old_act->sa_flags = oact.sa_flags;
6103 old_act->sa_restorer = oact.sa_restorer;
6104 unlock_user_struct(old_act, arg3, 1);
6110 case TARGET_NR_rt_sigaction:
6112 #if defined(TARGET_ALPHA)
6113 struct target_sigaction act, oact, *pact = 0;
6114 struct target_rt_sigaction *rt_act;
6115 /* ??? arg4 == sizeof(sigset_t). */
6117 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6119 act._sa_handler = rt_act->_sa_handler;
6120 act.sa_mask = rt_act->sa_mask;
6121 act.sa_flags = rt_act->sa_flags;
6122 act.sa_restorer = arg5;
6123 unlock_user_struct(rt_act, arg2, 0);
6126 ret = get_errno(do_sigaction(arg1, pact, &oact));
6127 if (!is_error(ret) && arg3) {
6128 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6130 rt_act->_sa_handler = oact._sa_handler;
6131 rt_act->sa_mask = oact.sa_mask;
6132 rt_act->sa_flags = oact.sa_flags;
6133 unlock_user_struct(rt_act, arg3, 1);
6136 struct target_sigaction *act;
6137 struct target_sigaction *oact;
6140 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6145 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6146 ret = -TARGET_EFAULT;
6147 goto rt_sigaction_fail;
6151 ret = get_errno(do_sigaction(arg1, act, oact));
6154 unlock_user_struct(act, arg2, 0);
6156 unlock_user_struct(oact, arg3, 1);
6160 #ifdef TARGET_NR_sgetmask /* not on alpha */
6161 case TARGET_NR_sgetmask:
6164 abi_ulong target_set;
6165 do_sigprocmask(0, NULL, &cur_set);
6166 host_to_target_old_sigset(&target_set, &cur_set);
6171 #ifdef TARGET_NR_ssetmask /* not on alpha */
6172 case TARGET_NR_ssetmask:
6174 sigset_t set, oset, cur_set;
6175 abi_ulong target_set = arg1;
6176 do_sigprocmask(0, NULL, &cur_set);
6177 target_to_host_old_sigset(&set, &target_set);
6178 sigorset(&set, &set, &cur_set);
6179 do_sigprocmask(SIG_SETMASK, &set, &oset);
6180 host_to_target_old_sigset(&target_set, &oset);
6185 #ifdef TARGET_NR_sigprocmask
6186 case TARGET_NR_sigprocmask:
6188 #if defined(TARGET_ALPHA)
6189 sigset_t set, oldset;
6194 case TARGET_SIG_BLOCK:
6197 case TARGET_SIG_UNBLOCK:
6200 case TARGET_SIG_SETMASK:
6204 ret = -TARGET_EINVAL;
6208 target_to_host_old_sigset(&set, &mask);
6210 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6211 if (!is_error(ret)) {
6212 host_to_target_old_sigset(&mask, &oldset);
6214 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6217 sigset_t set, oldset, *set_ptr;
6222 case TARGET_SIG_BLOCK:
6225 case TARGET_SIG_UNBLOCK:
6228 case TARGET_SIG_SETMASK:
6232 ret = -TARGET_EINVAL;
6235 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6237 target_to_host_old_sigset(&set, p);
6238 unlock_user(p, arg2, 0);
6244 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6245 if (!is_error(ret) && arg3) {
6246 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6248 host_to_target_old_sigset(p, &oldset);
6249 unlock_user(p, arg3, sizeof(target_sigset_t));
6255 case TARGET_NR_rt_sigprocmask:
6258 sigset_t set, oldset, *set_ptr;
6262 case TARGET_SIG_BLOCK:
6265 case TARGET_SIG_UNBLOCK:
6268 case TARGET_SIG_SETMASK:
6272 ret = -TARGET_EINVAL;
6275 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6277 target_to_host_sigset(&set, p);
6278 unlock_user(p, arg2, 0);
6284 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6285 if (!is_error(ret) && arg3) {
6286 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6288 host_to_target_sigset(p, &oldset);
6289 unlock_user(p, arg3, sizeof(target_sigset_t));
6293 #ifdef TARGET_NR_sigpending
6294 case TARGET_NR_sigpending:
6297 ret = get_errno(sigpending(&set));
6298 if (!is_error(ret)) {
6299 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6301 host_to_target_old_sigset(p, &set);
6302 unlock_user(p, arg1, sizeof(target_sigset_t));
6307 case TARGET_NR_rt_sigpending:
6310 ret = get_errno(sigpending(&set));
6311 if (!is_error(ret)) {
6312 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6314 host_to_target_sigset(p, &set);
6315 unlock_user(p, arg1, sizeof(target_sigset_t));
6319 #ifdef TARGET_NR_sigsuspend
6320 case TARGET_NR_sigsuspend:
6323 #if defined(TARGET_ALPHA)
6324 abi_ulong mask = arg1;
6325 target_to_host_old_sigset(&set, &mask);
6327 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6329 target_to_host_old_sigset(&set, p);
6330 unlock_user(p, arg1, 0);
6332 ret = get_errno(sigsuspend(&set));
6336 case TARGET_NR_rt_sigsuspend:
6339 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6341 target_to_host_sigset(&set, p);
6342 unlock_user(p, arg1, 0);
6343 ret = get_errno(sigsuspend(&set));
6346 case TARGET_NR_rt_sigtimedwait:
6349 struct timespec uts, *puts;
6352 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6354 target_to_host_sigset(&set, p);
6355 unlock_user(p, arg1, 0);
6358 target_to_host_timespec(puts, arg3);
6362 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6363 if (!is_error(ret)) {
6365 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6370 host_to_target_siginfo(p, &uinfo);
6371 unlock_user(p, arg2, sizeof(target_siginfo_t));
6373 ret = host_to_target_signal(ret);
6377 case TARGET_NR_rt_sigqueueinfo:
6380 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6382 target_to_host_siginfo(&uinfo, p);
6383 unlock_user(p, arg1, 0);
6384 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6387 #ifdef TARGET_NR_sigreturn
6388 case TARGET_NR_sigreturn:
6389 /* NOTE: ret is eax, so not transcoding must be done */
6390 ret = do_sigreturn(cpu_env);
6393 case TARGET_NR_rt_sigreturn:
6394 /* NOTE: ret is eax, so not transcoding must be done */
6395 ret = do_rt_sigreturn(cpu_env);
6397 case TARGET_NR_sethostname:
6398 if (!(p = lock_user_string(arg1)))
6400 ret = get_errno(sethostname(p, arg2));
6401 unlock_user(p, arg1, 0);
6403 case TARGET_NR_setrlimit:
6405 int resource = target_to_host_resource(arg1);
6406 struct target_rlimit *target_rlim;
6408 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6410 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6411 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6412 unlock_user_struct(target_rlim, arg2, 0);
6413 ret = get_errno(setrlimit(resource, &rlim));
6416 case TARGET_NR_getrlimit:
6418 int resource = target_to_host_resource(arg1);
6419 struct target_rlimit *target_rlim;
6422 ret = get_errno(getrlimit(resource, &rlim));
6423 if (!is_error(ret)) {
6424 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6426 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6427 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6428 unlock_user_struct(target_rlim, arg2, 1);
6432 case TARGET_NR_getrusage:
6434 struct rusage rusage;
6435 ret = get_errno(getrusage(arg1, &rusage));
6436 if (!is_error(ret)) {
6437 ret = host_to_target_rusage(arg2, &rusage);
6441 case TARGET_NR_gettimeofday:
6444 ret = get_errno(gettimeofday(&tv, NULL));
6445 if (!is_error(ret)) {
6446 if (copy_to_user_timeval(arg1, &tv))
6451 case TARGET_NR_settimeofday:
6453 struct timeval tv, *ptv = NULL;
6454 struct timezone tz, *ptz = NULL;
6457 if (copy_from_user_timeval(&tv, arg1)) {
6464 if (copy_from_user_timezone(&tz, arg2)) {
6470 ret = get_errno(settimeofday(ptv, ptz));
6473 #if defined(TARGET_NR_select)
6474 case TARGET_NR_select:
6475 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6476 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6479 struct target_sel_arg_struct *sel;
6480 abi_ulong inp, outp, exp, tvp;
6483 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6485 nsel = tswapal(sel->n);
6486 inp = tswapal(sel->inp);
6487 outp = tswapal(sel->outp);
6488 exp = tswapal(sel->exp);
6489 tvp = tswapal(sel->tvp);
6490 unlock_user_struct(sel, arg1, 0);
6491 ret = do_select(nsel, inp, outp, exp, tvp);
6496 #ifdef TARGET_NR_pselect6
6497 case TARGET_NR_pselect6:
6499 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6500 fd_set rfds, wfds, efds;
6501 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6502 struct timespec ts, *ts_ptr;
6505 * The 6th arg is actually two args smashed together,
6506 * so we cannot use the C library.
6514 abi_ulong arg_sigset, arg_sigsize, *arg7;
6515 target_sigset_t *target_sigset;
6523 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6527 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6531 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6537 * This takes a timespec, and not a timeval, so we cannot
6538 * use the do_select() helper ...
6541 if (target_to_host_timespec(&ts, ts_addr)) {
6549 /* Extract the two packed args for the sigset */
6552 sig.size = _NSIG / 8;
6554 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6558 arg_sigset = tswapal(arg7[0]);
6559 arg_sigsize = tswapal(arg7[1]);
6560 unlock_user(arg7, arg6, 0);
6564 if (arg_sigsize != sizeof(*target_sigset)) {
6565 /* Like the kernel, we enforce correct size sigsets */
6566 ret = -TARGET_EINVAL;
6569 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6570 sizeof(*target_sigset), 1);
6571 if (!target_sigset) {
6574 target_to_host_sigset(&set, target_sigset);
6575 unlock_user(target_sigset, arg_sigset, 0);
6583 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6586 if (!is_error(ret)) {
6587 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6589 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6591 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6594 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6600 case TARGET_NR_symlink:
6603 p = lock_user_string(arg1);
6604 p2 = lock_user_string(arg2);
6606 ret = -TARGET_EFAULT;
6608 ret = get_errno(symlink(p, p2));
6609 unlock_user(p2, arg2, 0);
6610 unlock_user(p, arg1, 0);
6613 #if defined(TARGET_NR_symlinkat)
6614 case TARGET_NR_symlinkat:
6617 p = lock_user_string(arg1);
6618 p2 = lock_user_string(arg3);
6620 ret = -TARGET_EFAULT;
6622 ret = get_errno(symlinkat(p, arg2, p2));
6623 unlock_user(p2, arg3, 0);
6624 unlock_user(p, arg1, 0);
6628 #ifdef TARGET_NR_oldlstat
6629 case TARGET_NR_oldlstat:
6632 case TARGET_NR_readlink:
6635 p = lock_user_string(arg1);
6636 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6638 ret = -TARGET_EFAULT;
6639 } else if (is_proc_myself((const char *)p, "exe")) {
6640 char real[PATH_MAX], *temp;
6641 temp = realpath(exec_path, real);
6642 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6643 snprintf((char *)p2, arg3, "%s", real);
6645 ret = get_errno(readlink(path(p), p2, arg3));
6647 unlock_user(p2, arg2, ret);
6648 unlock_user(p, arg1, 0);
6651 #if defined(TARGET_NR_readlinkat)
6652 case TARGET_NR_readlinkat:
6655 p = lock_user_string(arg2);
6656 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6658 ret = -TARGET_EFAULT;
6659 } else if (is_proc_myself((const char *)p, "exe")) {
6660 char real[PATH_MAX], *temp;
6661 temp = realpath(exec_path, real);
6662 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6663 snprintf((char *)p2, arg4, "%s", real);
6665 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6667 unlock_user(p2, arg3, ret);
6668 unlock_user(p, arg2, 0);
6672 #ifdef TARGET_NR_uselib
6673 case TARGET_NR_uselib:
6676 #ifdef TARGET_NR_swapon
6677 case TARGET_NR_swapon:
6678 if (!(p = lock_user_string(arg1)))
6680 ret = get_errno(swapon(p, arg2));
6681 unlock_user(p, arg1, 0);
6684 case TARGET_NR_reboot:
6685 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6686 /* arg4 must be ignored in all other cases */
6687 p = lock_user_string(arg4);
6691 ret = get_errno(reboot(arg1, arg2, arg3, p));
6692 unlock_user(p, arg4, 0);
6694 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6697 #ifdef TARGET_NR_readdir
6698 case TARGET_NR_readdir:
6701 #ifdef TARGET_NR_mmap
6702 case TARGET_NR_mmap:
6703 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6704 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6705 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6706 || defined(TARGET_S390X)
6709 abi_ulong v1, v2, v3, v4, v5, v6;
6710 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6718 unlock_user(v, arg1, 0);
6719 ret = get_errno(target_mmap(v1, v2, v3,
6720 target_to_host_bitmask(v4, mmap_flags_tbl),
6724 ret = get_errno(target_mmap(arg1, arg2, arg3,
6725 target_to_host_bitmask(arg4, mmap_flags_tbl),
6731 #ifdef TARGET_NR_mmap2
6732 case TARGET_NR_mmap2:
6734 #define MMAP_SHIFT 12
6736 ret = get_errno(target_mmap(arg1, arg2, arg3,
6737 target_to_host_bitmask(arg4, mmap_flags_tbl),
6739 arg6 << MMAP_SHIFT));
6742 case TARGET_NR_munmap:
6743 ret = get_errno(target_munmap(arg1, arg2));
6745 case TARGET_NR_mprotect:
6747 TaskState *ts = cpu->opaque;
6748 /* Special hack to detect libc making the stack executable. */
6749 if ((arg3 & PROT_GROWSDOWN)
6750 && arg1 >= ts->info->stack_limit
6751 && arg1 <= ts->info->start_stack) {
6752 arg3 &= ~PROT_GROWSDOWN;
6753 arg2 = arg2 + arg1 - ts->info->stack_limit;
6754 arg1 = ts->info->stack_limit;
6757 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6759 #ifdef TARGET_NR_mremap
6760 case TARGET_NR_mremap:
6761 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6764 /* ??? msync/mlock/munlock are broken for softmmu. */
6765 #ifdef TARGET_NR_msync
6766 case TARGET_NR_msync:
6767 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6770 #ifdef TARGET_NR_mlock
6771 case TARGET_NR_mlock:
6772 ret = get_errno(mlock(g2h(arg1), arg2));
6775 #ifdef TARGET_NR_munlock
6776 case TARGET_NR_munlock:
6777 ret = get_errno(munlock(g2h(arg1), arg2));
6780 #ifdef TARGET_NR_mlockall
6781 case TARGET_NR_mlockall:
6782 ret = get_errno(mlockall(arg1));
6785 #ifdef TARGET_NR_munlockall
6786 case TARGET_NR_munlockall:
6787 ret = get_errno(munlockall());
6790 case TARGET_NR_truncate:
6791 if (!(p = lock_user_string(arg1)))
6793 ret = get_errno(truncate(p, arg2));
6794 unlock_user(p, arg1, 0);
6796 case TARGET_NR_ftruncate:
6797 ret = get_errno(ftruncate(arg1, arg2));
6799 case TARGET_NR_fchmod:
6800 ret = get_errno(fchmod(arg1, arg2));
6802 #if defined(TARGET_NR_fchmodat)
6803 case TARGET_NR_fchmodat:
6804 if (!(p = lock_user_string(arg2)))
6806 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6807 unlock_user(p, arg2, 0);
6810 case TARGET_NR_getpriority:
6811 /* Note that negative values are valid for getpriority, so we must
6812 differentiate based on errno settings. */
6814 ret = getpriority(arg1, arg2);
6815 if (ret == -1 && errno != 0) {
6816 ret = -host_to_target_errno(errno);
6820 /* Return value is the unbiased priority. Signal no error. */
6821 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6823 /* Return value is a biased priority to avoid negative numbers. */
6827 case TARGET_NR_setpriority:
6828 ret = get_errno(setpriority(arg1, arg2, arg3));
6830 #ifdef TARGET_NR_profil
6831 case TARGET_NR_profil:
6834 case TARGET_NR_statfs:
6835 if (!(p = lock_user_string(arg1)))
6837 ret = get_errno(statfs(path(p), &stfs));
6838 unlock_user(p, arg1, 0);
6840 if (!is_error(ret)) {
6841 struct target_statfs *target_stfs;
6843 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6845 __put_user(stfs.f_type, &target_stfs->f_type);
6846 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6847 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6848 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6849 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6850 __put_user(stfs.f_files, &target_stfs->f_files);
6851 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6852 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6853 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6854 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6855 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6856 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6857 unlock_user_struct(target_stfs, arg2, 1);
6860 case TARGET_NR_fstatfs:
6861 ret = get_errno(fstatfs(arg1, &stfs));
6862 goto convert_statfs;
6863 #ifdef TARGET_NR_statfs64
6864 case TARGET_NR_statfs64:
6865 if (!(p = lock_user_string(arg1)))
6867 ret = get_errno(statfs(path(p), &stfs));
6868 unlock_user(p, arg1, 0);
6870 if (!is_error(ret)) {
6871 struct target_statfs64 *target_stfs;
6873 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6875 __put_user(stfs.f_type, &target_stfs->f_type);
6876 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6877 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6878 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6879 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6880 __put_user(stfs.f_files, &target_stfs->f_files);
6881 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6882 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6883 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6884 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6885 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6886 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6887 unlock_user_struct(target_stfs, arg3, 1);
6890 case TARGET_NR_fstatfs64:
6891 ret = get_errno(fstatfs(arg1, &stfs));
6892 goto convert_statfs64;
6894 #ifdef TARGET_NR_ioperm
6895 case TARGET_NR_ioperm:
6898 #ifdef TARGET_NR_socketcall
6899 case TARGET_NR_socketcall:
6900 ret = do_socketcall(arg1, arg2);
6903 #ifdef TARGET_NR_accept
6904 case TARGET_NR_accept:
6905 ret = do_accept4(arg1, arg2, arg3, 0);
6908 #ifdef TARGET_NR_accept4
6909 case TARGET_NR_accept4:
6910 #ifdef CONFIG_ACCEPT4
6911 ret = do_accept4(arg1, arg2, arg3, arg4);
6917 #ifdef TARGET_NR_bind
6918 case TARGET_NR_bind:
6919 ret = do_bind(arg1, arg2, arg3);
6922 #ifdef TARGET_NR_connect
6923 case TARGET_NR_connect:
6924 ret = do_connect(arg1, arg2, arg3);
6927 #ifdef TARGET_NR_getpeername
6928 case TARGET_NR_getpeername:
6929 ret = do_getpeername(arg1, arg2, arg3);
6932 #ifdef TARGET_NR_getsockname
6933 case TARGET_NR_getsockname:
6934 ret = do_getsockname(arg1, arg2, arg3);
6937 #ifdef TARGET_NR_getsockopt
6938 case TARGET_NR_getsockopt:
6939 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6942 #ifdef TARGET_NR_listen
6943 case TARGET_NR_listen:
6944 ret = get_errno(listen(arg1, arg2));
6947 #ifdef TARGET_NR_recv
6948 case TARGET_NR_recv:
6949 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6952 #ifdef TARGET_NR_recvfrom
6953 case TARGET_NR_recvfrom:
6954 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6957 #ifdef TARGET_NR_recvmsg
6958 case TARGET_NR_recvmsg:
6959 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6962 #ifdef TARGET_NR_send
6963 case TARGET_NR_send:
6964 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6967 #ifdef TARGET_NR_sendmsg
6968 case TARGET_NR_sendmsg:
6969 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6972 #ifdef TARGET_NR_sendmmsg
6973 case TARGET_NR_sendmmsg:
6974 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
6976 case TARGET_NR_recvmmsg:
6977 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
6980 #ifdef TARGET_NR_sendto
6981 case TARGET_NR_sendto:
6982 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6985 #ifdef TARGET_NR_shutdown
6986 case TARGET_NR_shutdown:
6987 ret = get_errno(shutdown(arg1, arg2));
6990 #ifdef TARGET_NR_socket
6991 case TARGET_NR_socket:
6992 ret = do_socket(arg1, arg2, arg3);
6995 #ifdef TARGET_NR_socketpair
6996 case TARGET_NR_socketpair:
6997 ret = do_socketpair(arg1, arg2, arg3, arg4);
7000 #ifdef TARGET_NR_setsockopt
7001 case TARGET_NR_setsockopt:
7002 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7006 case TARGET_NR_syslog:
7007 if (!(p = lock_user_string(arg2)))
7009 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7010 unlock_user(p, arg2, 0);
7013 case TARGET_NR_setitimer:
7015 struct itimerval value, ovalue, *pvalue;
7019 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7020 || copy_from_user_timeval(&pvalue->it_value,
7021 arg2 + sizeof(struct target_timeval)))
7026 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7027 if (!is_error(ret) && arg3) {
7028 if (copy_to_user_timeval(arg3,
7029 &ovalue.it_interval)
7030 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7036 case TARGET_NR_getitimer:
7038 struct itimerval value;
7040 ret = get_errno(getitimer(arg1, &value));
7041 if (!is_error(ret) && arg2) {
7042 if (copy_to_user_timeval(arg2,
7044 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7050 case TARGET_NR_stat:
7051 if (!(p = lock_user_string(arg1)))
7053 ret = get_errno(stat(path(p), &st));
7054 unlock_user(p, arg1, 0);
7056 case TARGET_NR_lstat:
7057 if (!(p = lock_user_string(arg1)))
7059 ret = get_errno(lstat(path(p), &st));
7060 unlock_user(p, arg1, 0);
7062 case TARGET_NR_fstat:
7064 ret = get_errno(fstat(arg1, &st));
7066 if (!is_error(ret)) {
7067 struct target_stat *target_st;
7069 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7071 memset(target_st, 0, sizeof(*target_st));
7072 __put_user(st.st_dev, &target_st->st_dev);
7073 __put_user(st.st_ino, &target_st->st_ino);
7074 __put_user(st.st_mode, &target_st->st_mode);
7075 __put_user(st.st_uid, &target_st->st_uid);
7076 __put_user(st.st_gid, &target_st->st_gid);
7077 __put_user(st.st_nlink, &target_st->st_nlink);
7078 __put_user(st.st_rdev, &target_st->st_rdev);
7079 __put_user(st.st_size, &target_st->st_size);
7080 __put_user(st.st_blksize, &target_st->st_blksize);
7081 __put_user(st.st_blocks, &target_st->st_blocks);
7082 __put_user(st.st_atime, &target_st->target_st_atime);
7083 __put_user(st.st_mtime, &target_st->target_st_mtime);
7084 __put_user(st.st_ctime, &target_st->target_st_ctime);
7085 unlock_user_struct(target_st, arg2, 1);
7089 #ifdef TARGET_NR_olduname
7090 case TARGET_NR_olduname:
7093 #ifdef TARGET_NR_iopl
7094 case TARGET_NR_iopl:
7097 case TARGET_NR_vhangup:
7098 ret = get_errno(vhangup());
7100 #ifdef TARGET_NR_idle
7101 case TARGET_NR_idle:
7104 #ifdef TARGET_NR_syscall
7105 case TARGET_NR_syscall:
7106 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7107 arg6, arg7, arg8, 0);
7110 case TARGET_NR_wait4:
7113 abi_long status_ptr = arg2;
7114 struct rusage rusage, *rusage_ptr;
7115 abi_ulong target_rusage = arg4;
7116 abi_long rusage_err;
7118 rusage_ptr = &rusage;
7121 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7122 if (!is_error(ret)) {
7123 if (status_ptr && ret) {
7124 status = host_to_target_waitstatus(status);
7125 if (put_user_s32(status, status_ptr))
7128 if (target_rusage) {
7129 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7137 #ifdef TARGET_NR_swapoff
7138 case TARGET_NR_swapoff:
7139 if (!(p = lock_user_string(arg1)))
7141 ret = get_errno(swapoff(p));
7142 unlock_user(p, arg1, 0);
7145 case TARGET_NR_sysinfo:
7147 struct target_sysinfo *target_value;
7148 struct sysinfo value;
7149 ret = get_errno(sysinfo(&value));
7150 if (!is_error(ret) && arg1)
7152 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7154 __put_user(value.uptime, &target_value->uptime);
7155 __put_user(value.loads[0], &target_value->loads[0]);
7156 __put_user(value.loads[1], &target_value->loads[1]);
7157 __put_user(value.loads[2], &target_value->loads[2]);
7158 __put_user(value.totalram, &target_value->totalram);
7159 __put_user(value.freeram, &target_value->freeram);
7160 __put_user(value.sharedram, &target_value->sharedram);
7161 __put_user(value.bufferram, &target_value->bufferram);
7162 __put_user(value.totalswap, &target_value->totalswap);
7163 __put_user(value.freeswap, &target_value->freeswap);
7164 __put_user(value.procs, &target_value->procs);
7165 __put_user(value.totalhigh, &target_value->totalhigh);
7166 __put_user(value.freehigh, &target_value->freehigh);
7167 __put_user(value.mem_unit, &target_value->mem_unit);
7168 unlock_user_struct(target_value, arg1, 1);
7172 #ifdef TARGET_NR_ipc
7174 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7177 #ifdef TARGET_NR_semget
7178 case TARGET_NR_semget:
7179 ret = get_errno(semget(arg1, arg2, arg3));
7182 #ifdef TARGET_NR_semop
7183 case TARGET_NR_semop:
7184 ret = do_semop(arg1, arg2, arg3);
7187 #ifdef TARGET_NR_semctl
7188 case TARGET_NR_semctl:
7189 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
7192 #ifdef TARGET_NR_msgctl
7193 case TARGET_NR_msgctl:
7194 ret = do_msgctl(arg1, arg2, arg3);
7197 #ifdef TARGET_NR_msgget
7198 case TARGET_NR_msgget:
7199 ret = get_errno(msgget(arg1, arg2));
7202 #ifdef TARGET_NR_msgrcv
7203 case TARGET_NR_msgrcv:
7204 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7207 #ifdef TARGET_NR_msgsnd
7208 case TARGET_NR_msgsnd:
7209 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7212 #ifdef TARGET_NR_shmget
7213 case TARGET_NR_shmget:
7214 ret = get_errno(shmget(arg1, arg2, arg3));
7217 #ifdef TARGET_NR_shmctl
7218 case TARGET_NR_shmctl:
7219 ret = do_shmctl(arg1, arg2, arg3);
7222 #ifdef TARGET_NR_shmat
7223 case TARGET_NR_shmat:
7224 ret = do_shmat(arg1, arg2, arg3);
7227 #ifdef TARGET_NR_shmdt
7228 case TARGET_NR_shmdt:
7229 ret = do_shmdt(arg1);
7232 case TARGET_NR_fsync:
7233 ret = get_errno(fsync(arg1));
7235 case TARGET_NR_clone:
7236 /* Linux manages to have three different orderings for its
7237 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7238 * match the kernel's CONFIG_CLONE_* settings.
7239 * Microblaze is further special in that it uses a sixth
7240 * implicit argument to clone for the TLS pointer.
7242 #if defined(TARGET_MICROBLAZE)
7243 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7244 #elif defined(TARGET_CLONE_BACKWARDS)
7245 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7246 #elif defined(TARGET_CLONE_BACKWARDS2)
7247 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7249 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7252 #ifdef __NR_exit_group
7253 /* new thread calls */
7254 case TARGET_NR_exit_group:
7258 gdb_exit(cpu_env, arg1);
7259 ret = get_errno(exit_group(arg1));
7262 case TARGET_NR_setdomainname:
7263 if (!(p = lock_user_string(arg1)))
7265 ret = get_errno(setdomainname(p, arg2));
7266 unlock_user(p, arg1, 0);
7268 case TARGET_NR_uname:
7269 /* no need to transcode because we use the linux syscall */
7271 struct new_utsname * buf;
7273 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7275 ret = get_errno(sys_uname(buf));
7276 if (!is_error(ret)) {
7277 /* Overrite the native machine name with whatever is being
7279 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7280 /* Allow the user to override the reported release. */
7281 if (qemu_uname_release && *qemu_uname_release)
7282 strcpy (buf->release, qemu_uname_release);
7284 unlock_user_struct(buf, arg1, 1);
7288 case TARGET_NR_modify_ldt:
7289 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7291 #if !defined(TARGET_X86_64)
7292 case TARGET_NR_vm86old:
7294 case TARGET_NR_vm86:
7295 ret = do_vm86(cpu_env, arg1, arg2);
7299 case TARGET_NR_adjtimex:
7301 #ifdef TARGET_NR_create_module
7302 case TARGET_NR_create_module:
7304 case TARGET_NR_init_module:
7305 case TARGET_NR_delete_module:
7306 #ifdef TARGET_NR_get_kernel_syms
7307 case TARGET_NR_get_kernel_syms:
7310 case TARGET_NR_quotactl:
7312 case TARGET_NR_getpgid:
7313 ret = get_errno(getpgid(arg1));
7315 case TARGET_NR_fchdir:
7316 ret = get_errno(fchdir(arg1));
7318 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7319 case TARGET_NR_bdflush:
7322 #ifdef TARGET_NR_sysfs
7323 case TARGET_NR_sysfs:
7326 case TARGET_NR_personality:
7327 ret = get_errno(personality(arg1));
7329 #ifdef TARGET_NR_afs_syscall
7330 case TARGET_NR_afs_syscall:
7333 #ifdef TARGET_NR__llseek /* Not on alpha */
7334 case TARGET_NR__llseek:
7337 #if !defined(__NR_llseek)
7338 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7340 ret = get_errno(res);
7345 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7347 if ((ret == 0) && put_user_s64(res, arg4)) {
7353 case TARGET_NR_getdents:
7354 #ifdef __NR_getdents
7355 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7357 struct target_dirent *target_dirp;
7358 struct linux_dirent *dirp;
7359 abi_long count = arg3;
7361 dirp = malloc(count);
7363 ret = -TARGET_ENOMEM;
7367 ret = get_errno(sys_getdents(arg1, dirp, count));
7368 if (!is_error(ret)) {
7369 struct linux_dirent *de;
7370 struct target_dirent *tde;
7372 int reclen, treclen;
7373 int count1, tnamelen;
7377 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7381 reclen = de->d_reclen;
7382 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7383 assert(tnamelen >= 0);
7384 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7385 assert(count1 + treclen <= count);
7386 tde->d_reclen = tswap16(treclen);
7387 tde->d_ino = tswapal(de->d_ino);
7388 tde->d_off = tswapal(de->d_off);
7389 memcpy(tde->d_name, de->d_name, tnamelen);
7390 de = (struct linux_dirent *)((char *)de + reclen);
7392 tde = (struct target_dirent *)((char *)tde + treclen);
7396 unlock_user(target_dirp, arg2, ret);
7402 struct linux_dirent *dirp;
7403 abi_long count = arg3;
7405 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7407 ret = get_errno(sys_getdents(arg1, dirp, count));
7408 if (!is_error(ret)) {
7409 struct linux_dirent *de;
7414 reclen = de->d_reclen;
7417 de->d_reclen = tswap16(reclen);
7418 tswapls(&de->d_ino);
7419 tswapls(&de->d_off);
7420 de = (struct linux_dirent *)((char *)de + reclen);
7424 unlock_user(dirp, arg2, ret);
7428 /* Implement getdents in terms of getdents64 */
7430 struct linux_dirent64 *dirp;
7431 abi_long count = arg3;
7433 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7437 ret = get_errno(sys_getdents64(arg1, dirp, count));
7438 if (!is_error(ret)) {
7439 /* Convert the dirent64 structs to target dirent. We do this
7440 * in-place, since we can guarantee that a target_dirent is no
7441 * larger than a dirent64; however this means we have to be
7442 * careful to read everything before writing in the new format.
7444 struct linux_dirent64 *de;
7445 struct target_dirent *tde;
7450 tde = (struct target_dirent *)dirp;
7452 int namelen, treclen;
7453 int reclen = de->d_reclen;
7454 uint64_t ino = de->d_ino;
7455 int64_t off = de->d_off;
7456 uint8_t type = de->d_type;
7458 namelen = strlen(de->d_name);
7459 treclen = offsetof(struct target_dirent, d_name)
7461 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7463 memmove(tde->d_name, de->d_name, namelen + 1);
7464 tde->d_ino = tswapal(ino);
7465 tde->d_off = tswapal(off);
7466 tde->d_reclen = tswap16(treclen);
7467 /* The target_dirent type is in what was formerly a padding
7468 * byte at the end of the structure:
7470 *(((char *)tde) + treclen - 1) = type;
7472 de = (struct linux_dirent64 *)((char *)de + reclen);
7473 tde = (struct target_dirent *)((char *)tde + treclen);
7479 unlock_user(dirp, arg2, ret);
7483 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7484 case TARGET_NR_getdents64:
7486 struct linux_dirent64 *dirp;
7487 abi_long count = arg3;
7488 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7490 ret = get_errno(sys_getdents64(arg1, dirp, count));
7491 if (!is_error(ret)) {
7492 struct linux_dirent64 *de;
7497 reclen = de->d_reclen;
7500 de->d_reclen = tswap16(reclen);
7501 tswap64s((uint64_t *)&de->d_ino);
7502 tswap64s((uint64_t *)&de->d_off);
7503 de = (struct linux_dirent64 *)((char *)de + reclen);
7507 unlock_user(dirp, arg2, ret);
7510 #endif /* TARGET_NR_getdents64 */
7511 #if defined(TARGET_NR__newselect)
7512 case TARGET_NR__newselect:
7513 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7516 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7517 # ifdef TARGET_NR_poll
7518 case TARGET_NR_poll:
7520 # ifdef TARGET_NR_ppoll
7521 case TARGET_NR_ppoll:
7524 struct target_pollfd *target_pfd;
7525 unsigned int nfds = arg2;
7530 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7534 pfd = alloca(sizeof(struct pollfd) * nfds);
7535 for(i = 0; i < nfds; i++) {
7536 pfd[i].fd = tswap32(target_pfd[i].fd);
7537 pfd[i].events = tswap16(target_pfd[i].events);
7540 # ifdef TARGET_NR_ppoll
7541 if (num == TARGET_NR_ppoll) {
7542 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7543 target_sigset_t *target_set;
7544 sigset_t _set, *set = &_set;
7547 if (target_to_host_timespec(timeout_ts, arg3)) {
7548 unlock_user(target_pfd, arg1, 0);
7556 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7558 unlock_user(target_pfd, arg1, 0);
7561 target_to_host_sigset(set, target_set);
7566 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7568 if (!is_error(ret) && arg3) {
7569 host_to_target_timespec(arg3, timeout_ts);
7572 unlock_user(target_set, arg4, 0);
7576 ret = get_errno(poll(pfd, nfds, timeout));
7578 if (!is_error(ret)) {
7579 for(i = 0; i < nfds; i++) {
7580 target_pfd[i].revents = tswap16(pfd[i].revents);
7583 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7587 case TARGET_NR_flock:
7588 /* NOTE: the flock constant seems to be the same for every
7590 ret = get_errno(flock(arg1, arg2));
7592 case TARGET_NR_readv:
7594 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7596 ret = get_errno(readv(arg1, vec, arg3));
7597 unlock_iovec(vec, arg2, arg3, 1);
7599 ret = -host_to_target_errno(errno);
7603 case TARGET_NR_writev:
7605 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7607 ret = get_errno(writev(arg1, vec, arg3));
7608 unlock_iovec(vec, arg2, arg3, 0);
7610 ret = -host_to_target_errno(errno);
7614 case TARGET_NR_getsid:
7615 ret = get_errno(getsid(arg1));
7617 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7618 case TARGET_NR_fdatasync:
7619 ret = get_errno(fdatasync(arg1));
7622 case TARGET_NR__sysctl:
7623 /* We don't implement this, but ENOTDIR is always a safe
7625 ret = -TARGET_ENOTDIR;
7627 case TARGET_NR_sched_getaffinity:
7629 unsigned int mask_size;
7630 unsigned long *mask;
7633 * sched_getaffinity needs multiples of ulong, so need to take
7634 * care of mismatches between target ulong and host ulong sizes.
7636 if (arg2 & (sizeof(abi_ulong) - 1)) {
7637 ret = -TARGET_EINVAL;
7640 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7642 mask = alloca(mask_size);
7643 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7645 if (!is_error(ret)) {
7647 /* More data returned than the caller's buffer will fit.
7648 * This only happens if sizeof(abi_long) < sizeof(long)
7649 * and the caller passed us a buffer holding an odd number
7650 * of abi_longs. If the host kernel is actually using the
7651 * extra 4 bytes then fail EINVAL; otherwise we can just
7652 * ignore them and only copy the interesting part.
7654 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
7655 if (numcpus > arg2 * 8) {
7656 ret = -TARGET_EINVAL;
7662 if (copy_to_user(arg3, mask, ret)) {
7668 case TARGET_NR_sched_setaffinity:
7670 unsigned int mask_size;
7671 unsigned long *mask;
7674 * sched_setaffinity needs multiples of ulong, so need to take
7675 * care of mismatches between target ulong and host ulong sizes.
7677 if (arg2 & (sizeof(abi_ulong) - 1)) {
7678 ret = -TARGET_EINVAL;
7681 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7683 mask = alloca(mask_size);
7684 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7687 memcpy(mask, p, arg2);
7688 unlock_user_struct(p, arg2, 0);
7690 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7693 case TARGET_NR_sched_setparam:
7695 struct sched_param *target_schp;
7696 struct sched_param schp;
7698 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7700 schp.sched_priority = tswap32(target_schp->sched_priority);
7701 unlock_user_struct(target_schp, arg2, 0);
7702 ret = get_errno(sched_setparam(arg1, &schp));
7705 case TARGET_NR_sched_getparam:
7707 struct sched_param *target_schp;
7708 struct sched_param schp;
7709 ret = get_errno(sched_getparam(arg1, &schp));
7710 if (!is_error(ret)) {
7711 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7713 target_schp->sched_priority = tswap32(schp.sched_priority);
7714 unlock_user_struct(target_schp, arg2, 1);
7718 case TARGET_NR_sched_setscheduler:
7720 struct sched_param *target_schp;
7721 struct sched_param schp;
7722 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7724 schp.sched_priority = tswap32(target_schp->sched_priority);
7725 unlock_user_struct(target_schp, arg3, 0);
7726 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7729 case TARGET_NR_sched_getscheduler:
7730 ret = get_errno(sched_getscheduler(arg1));
7732 case TARGET_NR_sched_yield:
7733 ret = get_errno(sched_yield());
7735 case TARGET_NR_sched_get_priority_max:
7736 ret = get_errno(sched_get_priority_max(arg1));
7738 case TARGET_NR_sched_get_priority_min:
7739 ret = get_errno(sched_get_priority_min(arg1));
7741 case TARGET_NR_sched_rr_get_interval:
7744 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7745 if (!is_error(ret)) {
7746 host_to_target_timespec(arg2, &ts);
7750 case TARGET_NR_nanosleep:
7752 struct timespec req, rem;
7753 target_to_host_timespec(&req, arg1);
7754 ret = get_errno(nanosleep(&req, &rem));
7755 if (is_error(ret) && arg2) {
7756 host_to_target_timespec(arg2, &rem);
7760 #ifdef TARGET_NR_query_module
7761 case TARGET_NR_query_module:
7764 #ifdef TARGET_NR_nfsservctl
7765 case TARGET_NR_nfsservctl:
7768 case TARGET_NR_prctl:
7770 case PR_GET_PDEATHSIG:
7773 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7774 if (!is_error(ret) && arg2
7775 && put_user_ual(deathsig, arg2)) {
7783 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7787 ret = get_errno(prctl(arg1, (unsigned long)name,
7789 unlock_user(name, arg2, 16);
7794 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7798 ret = get_errno(prctl(arg1, (unsigned long)name,
7800 unlock_user(name, arg2, 0);
7805 /* Most prctl options have no pointer arguments */
7806 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7810 #ifdef TARGET_NR_arch_prctl
7811 case TARGET_NR_arch_prctl:
7812 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7813 ret = do_arch_prctl(cpu_env, arg1, arg2);
7819 #ifdef TARGET_NR_pread64
7820 case TARGET_NR_pread64:
7821 if (regpairs_aligned(cpu_env)) {
7825 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7827 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7828 unlock_user(p, arg2, ret);
7830 case TARGET_NR_pwrite64:
7831 if (regpairs_aligned(cpu_env)) {
7835 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7837 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7838 unlock_user(p, arg2, 0);
7841 case TARGET_NR_getcwd:
7842 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7844 ret = get_errno(sys_getcwd1(p, arg2));
7845 unlock_user(p, arg1, ret);
7847 case TARGET_NR_capget:
7848 case TARGET_NR_capset:
7850 struct target_user_cap_header *target_header;
7851 struct target_user_cap_data *target_data = NULL;
7852 struct __user_cap_header_struct header;
7853 struct __user_cap_data_struct data[2];
7854 struct __user_cap_data_struct *dataptr = NULL;
7855 int i, target_datalen;
7858 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
7861 header.version = tswap32(target_header->version);
7862 header.pid = tswap32(target_header->pid);
7864 if (header.version != _LINUX_CAPABILITY_VERSION) {
7865 /* Version 2 and up takes pointer to two user_data structs */
7869 target_datalen = sizeof(*target_data) * data_items;
7872 if (num == TARGET_NR_capget) {
7873 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
7875 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
7878 unlock_user_struct(target_header, arg1, 0);
7882 if (num == TARGET_NR_capset) {
7883 for (i = 0; i < data_items; i++) {
7884 data[i].effective = tswap32(target_data[i].effective);
7885 data[i].permitted = tswap32(target_data[i].permitted);
7886 data[i].inheritable = tswap32(target_data[i].inheritable);
7893 if (num == TARGET_NR_capget) {
7894 ret = get_errno(capget(&header, dataptr));
7896 ret = get_errno(capset(&header, dataptr));
7899 /* The kernel always updates version for both capget and capset */
7900 target_header->version = tswap32(header.version);
7901 unlock_user_struct(target_header, arg1, 1);
7904 if (num == TARGET_NR_capget) {
7905 for (i = 0; i < data_items; i++) {
7906 target_data[i].effective = tswap32(data[i].effective);
7907 target_data[i].permitted = tswap32(data[i].permitted);
7908 target_data[i].inheritable = tswap32(data[i].inheritable);
7910 unlock_user(target_data, arg2, target_datalen);
7912 unlock_user(target_data, arg2, 0);
7917 case TARGET_NR_sigaltstack:
7918 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7919 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7920 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7921 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7927 #ifdef CONFIG_SENDFILE
7928 case TARGET_NR_sendfile:
7933 ret = get_user_sal(off, arg3);
7934 if (is_error(ret)) {
7939 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7940 if (!is_error(ret) && arg3) {
7941 abi_long ret2 = put_user_sal(off, arg3);
7942 if (is_error(ret2)) {
7948 #ifdef TARGET_NR_sendfile64
7949 case TARGET_NR_sendfile64:
7954 ret = get_user_s64(off, arg3);
7955 if (is_error(ret)) {
7960 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7961 if (!is_error(ret) && arg3) {
7962 abi_long ret2 = put_user_s64(off, arg3);
7963 if (is_error(ret2)) {
7971 case TARGET_NR_sendfile:
7972 #ifdef TARGET_NR_sendfile64
7973 case TARGET_NR_sendfile64:
7978 #ifdef TARGET_NR_getpmsg
7979 case TARGET_NR_getpmsg:
7982 #ifdef TARGET_NR_putpmsg
7983 case TARGET_NR_putpmsg:
7986 #ifdef TARGET_NR_vfork
7987 case TARGET_NR_vfork:
7988 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7992 #ifdef TARGET_NR_ugetrlimit
7993 case TARGET_NR_ugetrlimit:
7996 int resource = target_to_host_resource(arg1);
7997 ret = get_errno(getrlimit(resource, &rlim));
7998 if (!is_error(ret)) {
7999 struct target_rlimit *target_rlim;
8000 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8002 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8003 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8004 unlock_user_struct(target_rlim, arg2, 1);
8009 #ifdef TARGET_NR_truncate64
8010 case TARGET_NR_truncate64:
8011 if (!(p = lock_user_string(arg1)))
8013 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8014 unlock_user(p, arg1, 0);
8017 #ifdef TARGET_NR_ftruncate64
8018 case TARGET_NR_ftruncate64:
8019 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8022 #ifdef TARGET_NR_stat64
8023 case TARGET_NR_stat64:
8024 if (!(p = lock_user_string(arg1)))
8026 ret = get_errno(stat(path(p), &st));
8027 unlock_user(p, arg1, 0);
8029 ret = host_to_target_stat64(cpu_env, arg2, &st);
8032 #ifdef TARGET_NR_lstat64
8033 case TARGET_NR_lstat64:
8034 if (!(p = lock_user_string(arg1)))
8036 ret = get_errno(lstat(path(p), &st));
8037 unlock_user(p, arg1, 0);
8039 ret = host_to_target_stat64(cpu_env, arg2, &st);
8042 #ifdef TARGET_NR_fstat64
8043 case TARGET_NR_fstat64:
8044 ret = get_errno(fstat(arg1, &st));
8046 ret = host_to_target_stat64(cpu_env, arg2, &st);
8049 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8050 #ifdef TARGET_NR_fstatat64
8051 case TARGET_NR_fstatat64:
8053 #ifdef TARGET_NR_newfstatat
8054 case TARGET_NR_newfstatat:
8056 if (!(p = lock_user_string(arg2)))
8058 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8060 ret = host_to_target_stat64(cpu_env, arg3, &st);
8063 case TARGET_NR_lchown:
8064 if (!(p = lock_user_string(arg1)))
8066 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8067 unlock_user(p, arg1, 0);
8069 #ifdef TARGET_NR_getuid
8070 case TARGET_NR_getuid:
8071 ret = get_errno(high2lowuid(getuid()));
8074 #ifdef TARGET_NR_getgid
8075 case TARGET_NR_getgid:
8076 ret = get_errno(high2lowgid(getgid()));
8079 #ifdef TARGET_NR_geteuid
8080 case TARGET_NR_geteuid:
8081 ret = get_errno(high2lowuid(geteuid()));
8084 #ifdef TARGET_NR_getegid
8085 case TARGET_NR_getegid:
8086 ret = get_errno(high2lowgid(getegid()));
8089 case TARGET_NR_setreuid:
8090 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8092 case TARGET_NR_setregid:
8093 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8095 case TARGET_NR_getgroups:
8097 int gidsetsize = arg1;
8098 target_id *target_grouplist;
8102 grouplist = alloca(gidsetsize * sizeof(gid_t));
8103 ret = get_errno(getgroups(gidsetsize, grouplist));
8104 if (gidsetsize == 0)
8106 if (!is_error(ret)) {
8107 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8108 if (!target_grouplist)
8110 for(i = 0;i < ret; i++)
8111 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8112 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8116 case TARGET_NR_setgroups:
8118 int gidsetsize = arg1;
8119 target_id *target_grouplist;
8120 gid_t *grouplist = NULL;
8123 grouplist = alloca(gidsetsize * sizeof(gid_t));
8124 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8125 if (!target_grouplist) {
8126 ret = -TARGET_EFAULT;
8129 for (i = 0; i < gidsetsize; i++) {
8130 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8132 unlock_user(target_grouplist, arg2, 0);
8134 ret = get_errno(setgroups(gidsetsize, grouplist));
8137 case TARGET_NR_fchown:
8138 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8140 #if defined(TARGET_NR_fchownat)
8141 case TARGET_NR_fchownat:
8142 if (!(p = lock_user_string(arg2)))
8144 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8145 low2highgid(arg4), arg5));
8146 unlock_user(p, arg2, 0);
8149 #ifdef TARGET_NR_setresuid
8150 case TARGET_NR_setresuid:
8151 ret = get_errno(setresuid(low2highuid(arg1),
8153 low2highuid(arg3)));
8156 #ifdef TARGET_NR_getresuid
8157 case TARGET_NR_getresuid:
8159 uid_t ruid, euid, suid;
8160 ret = get_errno(getresuid(&ruid, &euid, &suid));
8161 if (!is_error(ret)) {
8162 if (put_user_id(high2lowuid(ruid), arg1)
8163 || put_user_id(high2lowuid(euid), arg2)
8164 || put_user_id(high2lowuid(suid), arg3))
8170 #ifdef TARGET_NR_getresgid
8171 case TARGET_NR_setresgid:
8172 ret = get_errno(setresgid(low2highgid(arg1),
8174 low2highgid(arg3)));
8177 #ifdef TARGET_NR_getresgid
8178 case TARGET_NR_getresgid:
8180 gid_t rgid, egid, sgid;
8181 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8182 if (!is_error(ret)) {
8183 if (put_user_id(high2lowgid(rgid), arg1)
8184 || put_user_id(high2lowgid(egid), arg2)
8185 || put_user_id(high2lowgid(sgid), arg3))
8191 case TARGET_NR_chown:
8192 if (!(p = lock_user_string(arg1)))
8194 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8195 unlock_user(p, arg1, 0);
8197 case TARGET_NR_setuid:
8198 ret = get_errno(setuid(low2highuid(arg1)));
8200 case TARGET_NR_setgid:
8201 ret = get_errno(setgid(low2highgid(arg1)));
8203 case TARGET_NR_setfsuid:
8204 ret = get_errno(setfsuid(arg1));
8206 case TARGET_NR_setfsgid:
8207 ret = get_errno(setfsgid(arg1));
8210 #ifdef TARGET_NR_lchown32
8211 case TARGET_NR_lchown32:
8212 if (!(p = lock_user_string(arg1)))
8214 ret = get_errno(lchown(p, arg2, arg3));
8215 unlock_user(p, arg1, 0);
8218 #ifdef TARGET_NR_getuid32
8219 case TARGET_NR_getuid32:
8220 ret = get_errno(getuid());
8224 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8225 /* Alpha specific */
8226 case TARGET_NR_getxuid:
8230 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8232 ret = get_errno(getuid());
8235 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8236 /* Alpha specific */
8237 case TARGET_NR_getxgid:
8241 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8243 ret = get_errno(getgid());
8246 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8247 /* Alpha specific */
8248 case TARGET_NR_osf_getsysinfo:
8249 ret = -TARGET_EOPNOTSUPP;
8251 case TARGET_GSI_IEEE_FP_CONTROL:
8253 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8255 /* Copied from linux ieee_fpcr_to_swcr. */
8256 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8257 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8258 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8259 | SWCR_TRAP_ENABLE_DZE
8260 | SWCR_TRAP_ENABLE_OVF);
8261 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8262 | SWCR_TRAP_ENABLE_INE);
8263 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8264 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8266 if (put_user_u64 (swcr, arg2))
8272 /* case GSI_IEEE_STATE_AT_SIGNAL:
8273 -- Not implemented in linux kernel.
8275 -- Retrieves current unaligned access state; not much used.
8277 -- Retrieves implver information; surely not used.
8279 -- Grabs a copy of the HWRPB; surely not used.
8284 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8285 /* Alpha specific */
8286 case TARGET_NR_osf_setsysinfo:
8287 ret = -TARGET_EOPNOTSUPP;
8289 case TARGET_SSI_IEEE_FP_CONTROL:
8291 uint64_t swcr, fpcr, orig_fpcr;
8293 if (get_user_u64 (swcr, arg2)) {
8296 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8297 fpcr = orig_fpcr & FPCR_DYN_MASK;
8299 /* Copied from linux ieee_swcr_to_fpcr. */
8300 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8301 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8302 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8303 | SWCR_TRAP_ENABLE_DZE
8304 | SWCR_TRAP_ENABLE_OVF)) << 48;
8305 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8306 | SWCR_TRAP_ENABLE_INE)) << 57;
8307 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8308 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8310 cpu_alpha_store_fpcr(cpu_env, fpcr);
8315 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8317 uint64_t exc, fpcr, orig_fpcr;
8320 if (get_user_u64(exc, arg2)) {
8324 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8326 /* We only add to the exception status here. */
8327 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8329 cpu_alpha_store_fpcr(cpu_env, fpcr);
8332 /* Old exceptions are not signaled. */
8333 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8335 /* If any exceptions set by this call,
8336 and are unmasked, send a signal. */
8338 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8339 si_code = TARGET_FPE_FLTRES;
8341 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8342 si_code = TARGET_FPE_FLTUND;
8344 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8345 si_code = TARGET_FPE_FLTOVF;
8347 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8348 si_code = TARGET_FPE_FLTDIV;
8350 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8351 si_code = TARGET_FPE_FLTINV;
8354 target_siginfo_t info;
8355 info.si_signo = SIGFPE;
8357 info.si_code = si_code;
8358 info._sifields._sigfault._addr
8359 = ((CPUArchState *)cpu_env)->pc;
8360 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8365 /* case SSI_NVPAIRS:
8366 -- Used with SSIN_UACPROC to enable unaligned accesses.
8367 case SSI_IEEE_STATE_AT_SIGNAL:
8368 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8369 -- Not implemented in linux kernel
8374 #ifdef TARGET_NR_osf_sigprocmask
8375 /* Alpha specific. */
8376 case TARGET_NR_osf_sigprocmask:
8380 sigset_t set, oldset;
8383 case TARGET_SIG_BLOCK:
8386 case TARGET_SIG_UNBLOCK:
8389 case TARGET_SIG_SETMASK:
8393 ret = -TARGET_EINVAL;
8397 target_to_host_old_sigset(&set, &mask);
8398 do_sigprocmask(how, &set, &oldset);
8399 host_to_target_old_sigset(&mask, &oldset);
8405 #ifdef TARGET_NR_getgid32
8406 case TARGET_NR_getgid32:
8407 ret = get_errno(getgid());
8410 #ifdef TARGET_NR_geteuid32
8411 case TARGET_NR_geteuid32:
8412 ret = get_errno(geteuid());
8415 #ifdef TARGET_NR_getegid32
8416 case TARGET_NR_getegid32:
8417 ret = get_errno(getegid());
8420 #ifdef TARGET_NR_setreuid32
8421 case TARGET_NR_setreuid32:
8422 ret = get_errno(setreuid(arg1, arg2));
8425 #ifdef TARGET_NR_setregid32
8426 case TARGET_NR_setregid32:
8427 ret = get_errno(setregid(arg1, arg2));
8430 #ifdef TARGET_NR_getgroups32
8431 case TARGET_NR_getgroups32:
8433 int gidsetsize = arg1;
8434 uint32_t *target_grouplist;
8438 grouplist = alloca(gidsetsize * sizeof(gid_t));
8439 ret = get_errno(getgroups(gidsetsize, grouplist));
8440 if (gidsetsize == 0)
8442 if (!is_error(ret)) {
8443 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8444 if (!target_grouplist) {
8445 ret = -TARGET_EFAULT;
8448 for(i = 0;i < ret; i++)
8449 target_grouplist[i] = tswap32(grouplist[i]);
8450 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8455 #ifdef TARGET_NR_setgroups32
8456 case TARGET_NR_setgroups32:
8458 int gidsetsize = arg1;
8459 uint32_t *target_grouplist;
8463 grouplist = alloca(gidsetsize * sizeof(gid_t));
8464 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8465 if (!target_grouplist) {
8466 ret = -TARGET_EFAULT;
8469 for(i = 0;i < gidsetsize; i++)
8470 grouplist[i] = tswap32(target_grouplist[i]);
8471 unlock_user(target_grouplist, arg2, 0);
8472 ret = get_errno(setgroups(gidsetsize, grouplist));
8476 #ifdef TARGET_NR_fchown32
8477 case TARGET_NR_fchown32:
8478 ret = get_errno(fchown(arg1, arg2, arg3));
8481 #ifdef TARGET_NR_setresuid32
8482 case TARGET_NR_setresuid32:
8483 ret = get_errno(setresuid(arg1, arg2, arg3));
8486 #ifdef TARGET_NR_getresuid32
8487 case TARGET_NR_getresuid32:
8489 uid_t ruid, euid, suid;
8490 ret = get_errno(getresuid(&ruid, &euid, &suid));
8491 if (!is_error(ret)) {
8492 if (put_user_u32(ruid, arg1)
8493 || put_user_u32(euid, arg2)
8494 || put_user_u32(suid, arg3))
8500 #ifdef TARGET_NR_setresgid32
8501 case TARGET_NR_setresgid32:
8502 ret = get_errno(setresgid(arg1, arg2, arg3));
8505 #ifdef TARGET_NR_getresgid32
8506 case TARGET_NR_getresgid32:
8508 gid_t rgid, egid, sgid;
8509 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8510 if (!is_error(ret)) {
8511 if (put_user_u32(rgid, arg1)
8512 || put_user_u32(egid, arg2)
8513 || put_user_u32(sgid, arg3))
8519 #ifdef TARGET_NR_chown32
8520 case TARGET_NR_chown32:
8521 if (!(p = lock_user_string(arg1)))
8523 ret = get_errno(chown(p, arg2, arg3));
8524 unlock_user(p, arg1, 0);
8527 #ifdef TARGET_NR_setuid32
8528 case TARGET_NR_setuid32:
8529 ret = get_errno(setuid(arg1));
8532 #ifdef TARGET_NR_setgid32
8533 case TARGET_NR_setgid32:
8534 ret = get_errno(setgid(arg1));
8537 #ifdef TARGET_NR_setfsuid32
8538 case TARGET_NR_setfsuid32:
8539 ret = get_errno(setfsuid(arg1));
8542 #ifdef TARGET_NR_setfsgid32
8543 case TARGET_NR_setfsgid32:
8544 ret = get_errno(setfsgid(arg1));
8548 case TARGET_NR_pivot_root:
8550 #ifdef TARGET_NR_mincore
8551 case TARGET_NR_mincore:
8554 ret = -TARGET_EFAULT;
8555 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8557 if (!(p = lock_user_string(arg3)))
8559 ret = get_errno(mincore(a, arg2, p));
8560 unlock_user(p, arg3, ret);
8562 unlock_user(a, arg1, 0);
8566 #ifdef TARGET_NR_arm_fadvise64_64
8567 case TARGET_NR_arm_fadvise64_64:
8570 * arm_fadvise64_64 looks like fadvise64_64 but
8571 * with different argument order
8579 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8580 #ifdef TARGET_NR_fadvise64_64
8581 case TARGET_NR_fadvise64_64:
8583 #ifdef TARGET_NR_fadvise64
8584 case TARGET_NR_fadvise64:
8588 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8589 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8590 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8591 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8595 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8598 #ifdef TARGET_NR_madvise
8599 case TARGET_NR_madvise:
8600 /* A straight passthrough may not be safe because qemu sometimes
8601 turns private file-backed mappings into anonymous mappings.
8602 This will break MADV_DONTNEED.
8603 This is a hint, so ignoring and returning success is ok. */
8607 #if TARGET_ABI_BITS == 32
8608 case TARGET_NR_fcntl64:
8612 struct target_flock64 *target_fl;
8614 struct target_eabi_flock64 *target_efl;
8617 cmd = target_to_host_fcntl_cmd(arg2);
8618 if (cmd == -TARGET_EINVAL) {
8624 case TARGET_F_GETLK64:
8626 if (((CPUARMState *)cpu_env)->eabi) {
8627 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8629 fl.l_type = tswap16(target_efl->l_type);
8630 fl.l_whence = tswap16(target_efl->l_whence);
8631 fl.l_start = tswap64(target_efl->l_start);
8632 fl.l_len = tswap64(target_efl->l_len);
8633 fl.l_pid = tswap32(target_efl->l_pid);
8634 unlock_user_struct(target_efl, arg3, 0);
8638 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8640 fl.l_type = tswap16(target_fl->l_type);
8641 fl.l_whence = tswap16(target_fl->l_whence);
8642 fl.l_start = tswap64(target_fl->l_start);
8643 fl.l_len = tswap64(target_fl->l_len);
8644 fl.l_pid = tswap32(target_fl->l_pid);
8645 unlock_user_struct(target_fl, arg3, 0);
8647 ret = get_errno(fcntl(arg1, cmd, &fl));
8650 if (((CPUARMState *)cpu_env)->eabi) {
8651 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8653 target_efl->l_type = tswap16(fl.l_type);
8654 target_efl->l_whence = tswap16(fl.l_whence);
8655 target_efl->l_start = tswap64(fl.l_start);
8656 target_efl->l_len = tswap64(fl.l_len);
8657 target_efl->l_pid = tswap32(fl.l_pid);
8658 unlock_user_struct(target_efl, arg3, 1);
8662 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8664 target_fl->l_type = tswap16(fl.l_type);
8665 target_fl->l_whence = tswap16(fl.l_whence);
8666 target_fl->l_start = tswap64(fl.l_start);
8667 target_fl->l_len = tswap64(fl.l_len);
8668 target_fl->l_pid = tswap32(fl.l_pid);
8669 unlock_user_struct(target_fl, arg3, 1);
8674 case TARGET_F_SETLK64:
8675 case TARGET_F_SETLKW64:
8677 if (((CPUARMState *)cpu_env)->eabi) {
8678 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8680 fl.l_type = tswap16(target_efl->l_type);
8681 fl.l_whence = tswap16(target_efl->l_whence);
8682 fl.l_start = tswap64(target_efl->l_start);
8683 fl.l_len = tswap64(target_efl->l_len);
8684 fl.l_pid = tswap32(target_efl->l_pid);
8685 unlock_user_struct(target_efl, arg3, 0);
8689 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8691 fl.l_type = tswap16(target_fl->l_type);
8692 fl.l_whence = tswap16(target_fl->l_whence);
8693 fl.l_start = tswap64(target_fl->l_start);
8694 fl.l_len = tswap64(target_fl->l_len);
8695 fl.l_pid = tswap32(target_fl->l_pid);
8696 unlock_user_struct(target_fl, arg3, 0);
8698 ret = get_errno(fcntl(arg1, cmd, &fl));
8701 ret = do_fcntl(arg1, arg2, arg3);
8707 #ifdef TARGET_NR_cacheflush
8708 case TARGET_NR_cacheflush:
8709 /* self-modifying code is handled automatically, so nothing needed */
8713 #ifdef TARGET_NR_security
8714 case TARGET_NR_security:
8717 #ifdef TARGET_NR_getpagesize
8718 case TARGET_NR_getpagesize:
8719 ret = TARGET_PAGE_SIZE;
8722 case TARGET_NR_gettid:
8723 ret = get_errno(gettid());
8725 #ifdef TARGET_NR_readahead
8726 case TARGET_NR_readahead:
8727 #if TARGET_ABI_BITS == 32
8728 if (regpairs_aligned(cpu_env)) {
8733 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8735 ret = get_errno(readahead(arg1, arg2, arg3));
8740 #ifdef TARGET_NR_setxattr
8741 case TARGET_NR_listxattr:
8742 case TARGET_NR_llistxattr:
8746 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8748 ret = -TARGET_EFAULT;
8752 p = lock_user_string(arg1);
8754 if (num == TARGET_NR_listxattr) {
8755 ret = get_errno(listxattr(p, b, arg3));
8757 ret = get_errno(llistxattr(p, b, arg3));
8760 ret = -TARGET_EFAULT;
8762 unlock_user(p, arg1, 0);
8763 unlock_user(b, arg2, arg3);
8766 case TARGET_NR_flistxattr:
8770 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8772 ret = -TARGET_EFAULT;
8776 ret = get_errno(flistxattr(arg1, b, arg3));
8777 unlock_user(b, arg2, arg3);
8780 case TARGET_NR_setxattr:
8781 case TARGET_NR_lsetxattr:
8783 void *p, *n, *v = 0;
8785 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8787 ret = -TARGET_EFAULT;
8791 p = lock_user_string(arg1);
8792 n = lock_user_string(arg2);
8794 if (num == TARGET_NR_setxattr) {
8795 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8797 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8800 ret = -TARGET_EFAULT;
8802 unlock_user(p, arg1, 0);
8803 unlock_user(n, arg2, 0);
8804 unlock_user(v, arg3, 0);
8807 case TARGET_NR_fsetxattr:
8811 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8813 ret = -TARGET_EFAULT;
8817 n = lock_user_string(arg2);
8819 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8821 ret = -TARGET_EFAULT;
8823 unlock_user(n, arg2, 0);
8824 unlock_user(v, arg3, 0);
8827 case TARGET_NR_getxattr:
8828 case TARGET_NR_lgetxattr:
8830 void *p, *n, *v = 0;
8832 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8834 ret = -TARGET_EFAULT;
8838 p = lock_user_string(arg1);
8839 n = lock_user_string(arg2);
8841 if (num == TARGET_NR_getxattr) {
8842 ret = get_errno(getxattr(p, n, v, arg4));
8844 ret = get_errno(lgetxattr(p, n, v, arg4));
8847 ret = -TARGET_EFAULT;
8849 unlock_user(p, arg1, 0);
8850 unlock_user(n, arg2, 0);
8851 unlock_user(v, arg3, arg4);
8854 case TARGET_NR_fgetxattr:
8858 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8860 ret = -TARGET_EFAULT;
8864 n = lock_user_string(arg2);
8866 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8868 ret = -TARGET_EFAULT;
8870 unlock_user(n, arg2, 0);
8871 unlock_user(v, arg3, arg4);
8874 case TARGET_NR_removexattr:
8875 case TARGET_NR_lremovexattr:
8878 p = lock_user_string(arg1);
8879 n = lock_user_string(arg2);
8881 if (num == TARGET_NR_removexattr) {
8882 ret = get_errno(removexattr(p, n));
8884 ret = get_errno(lremovexattr(p, n));
8887 ret = -TARGET_EFAULT;
8889 unlock_user(p, arg1, 0);
8890 unlock_user(n, arg2, 0);
8893 case TARGET_NR_fremovexattr:
8896 n = lock_user_string(arg2);
8898 ret = get_errno(fremovexattr(arg1, n));
8900 ret = -TARGET_EFAULT;
8902 unlock_user(n, arg2, 0);
8906 #endif /* CONFIG_ATTR */
8907 #ifdef TARGET_NR_set_thread_area
8908 case TARGET_NR_set_thread_area:
8909 #if defined(TARGET_MIPS)
8910 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
8913 #elif defined(TARGET_CRIS)
8915 ret = -TARGET_EINVAL;
8917 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8921 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8922 ret = do_set_thread_area(cpu_env, arg1);
8924 #elif defined(TARGET_M68K)
8926 TaskState *ts = cpu->opaque;
8927 ts->tp_value = arg1;
8932 goto unimplemented_nowarn;
8935 #ifdef TARGET_NR_get_thread_area
8936 case TARGET_NR_get_thread_area:
8937 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8938 ret = do_get_thread_area(cpu_env, arg1);
8940 #elif defined(TARGET_M68K)
8942 TaskState *ts = cpu->opaque;
8947 goto unimplemented_nowarn;
8950 #ifdef TARGET_NR_getdomainname
8951 case TARGET_NR_getdomainname:
8952 goto unimplemented_nowarn;
8955 #ifdef TARGET_NR_clock_gettime
8956 case TARGET_NR_clock_gettime:
8959 ret = get_errno(clock_gettime(arg1, &ts));
8960 if (!is_error(ret)) {
8961 host_to_target_timespec(arg2, &ts);
8966 #ifdef TARGET_NR_clock_getres
8967 case TARGET_NR_clock_getres:
8970 ret = get_errno(clock_getres(arg1, &ts));
8971 if (!is_error(ret)) {
8972 host_to_target_timespec(arg2, &ts);
8977 #ifdef TARGET_NR_clock_nanosleep
8978 case TARGET_NR_clock_nanosleep:
8981 target_to_host_timespec(&ts, arg3);
8982 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8984 host_to_target_timespec(arg4, &ts);
8989 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8990 case TARGET_NR_set_tid_address:
8991 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8995 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8996 case TARGET_NR_tkill:
8997 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9001 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9002 case TARGET_NR_tgkill:
9003 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9004 target_to_host_signal(arg3)));
9008 #ifdef TARGET_NR_set_robust_list
9009 case TARGET_NR_set_robust_list:
9010 case TARGET_NR_get_robust_list:
9011 /* The ABI for supporting robust futexes has userspace pass
9012 * the kernel a pointer to a linked list which is updated by
9013 * userspace after the syscall; the list is walked by the kernel
9014 * when the thread exits. Since the linked list in QEMU guest
9015 * memory isn't a valid linked list for the host and we have
9016 * no way to reliably intercept the thread-death event, we can't
9017 * support these. Silently return ENOSYS so that guest userspace
9018 * falls back to a non-robust futex implementation (which should
9019 * be OK except in the corner case of the guest crashing while
9020 * holding a mutex that is shared with another process via
9023 goto unimplemented_nowarn;
9026 #if defined(TARGET_NR_utimensat)
9027 case TARGET_NR_utimensat:
9029 struct timespec *tsp, ts[2];
9033 target_to_host_timespec(ts, arg3);
9034 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9038 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9040 if (!(p = lock_user_string(arg2))) {
9041 ret = -TARGET_EFAULT;
9044 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9045 unlock_user(p, arg2, 0);
9050 case TARGET_NR_futex:
9051 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9053 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9054 case TARGET_NR_inotify_init:
9055 ret = get_errno(sys_inotify_init());
9058 #ifdef CONFIG_INOTIFY1
9059 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9060 case TARGET_NR_inotify_init1:
9061 ret = get_errno(sys_inotify_init1(arg1));
9065 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9066 case TARGET_NR_inotify_add_watch:
9067 p = lock_user_string(arg2);
9068 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9069 unlock_user(p, arg2, 0);
9072 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9073 case TARGET_NR_inotify_rm_watch:
9074 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9078 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9079 case TARGET_NR_mq_open:
9081 struct mq_attr posix_mq_attr;
9083 p = lock_user_string(arg1 - 1);
9085 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9086 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
9087 unlock_user (p, arg1, 0);
9091 case TARGET_NR_mq_unlink:
9092 p = lock_user_string(arg1 - 1);
9093 ret = get_errno(mq_unlink(p));
9094 unlock_user (p, arg1, 0);
9097 case TARGET_NR_mq_timedsend:
9101 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9103 target_to_host_timespec(&ts, arg5);
9104 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9105 host_to_target_timespec(arg5, &ts);
9108 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9109 unlock_user (p, arg2, arg3);
9113 case TARGET_NR_mq_timedreceive:
9118 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9120 target_to_host_timespec(&ts, arg5);
9121 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9122 host_to_target_timespec(arg5, &ts);
9125 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9126 unlock_user (p, arg2, arg3);
9128 put_user_u32(prio, arg4);
9132 /* Not implemented for now... */
9133 /* case TARGET_NR_mq_notify: */
9136 case TARGET_NR_mq_getsetattr:
9138 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9141 ret = mq_getattr(arg1, &posix_mq_attr_out);
9142 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9145 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9146 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9153 #ifdef CONFIG_SPLICE
9154 #ifdef TARGET_NR_tee
9157 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9161 #ifdef TARGET_NR_splice
9162 case TARGET_NR_splice:
9164 loff_t loff_in, loff_out;
9165 loff_t *ploff_in = NULL, *ploff_out = NULL;
9167 get_user_u64(loff_in, arg2);
9168 ploff_in = &loff_in;
9171 get_user_u64(loff_out, arg2);
9172 ploff_out = &loff_out;
9174 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9178 #ifdef TARGET_NR_vmsplice
9179 case TARGET_NR_vmsplice:
9181 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9183 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9184 unlock_iovec(vec, arg2, arg3, 0);
9186 ret = -host_to_target_errno(errno);
9191 #endif /* CONFIG_SPLICE */
9192 #ifdef CONFIG_EVENTFD
9193 #if defined(TARGET_NR_eventfd)
9194 case TARGET_NR_eventfd:
9195 ret = get_errno(eventfd(arg1, 0));
9198 #if defined(TARGET_NR_eventfd2)
9199 case TARGET_NR_eventfd2:
9201 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9202 if (arg2 & TARGET_O_NONBLOCK) {
9203 host_flags |= O_NONBLOCK;
9205 if (arg2 & TARGET_O_CLOEXEC) {
9206 host_flags |= O_CLOEXEC;
9208 ret = get_errno(eventfd(arg1, host_flags));
9212 #endif /* CONFIG_EVENTFD */
9213 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9214 case TARGET_NR_fallocate:
9215 #if TARGET_ABI_BITS == 32
9216 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9217 target_offset64(arg5, arg6)));
9219 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9223 #if defined(CONFIG_SYNC_FILE_RANGE)
9224 #if defined(TARGET_NR_sync_file_range)
9225 case TARGET_NR_sync_file_range:
9226 #if TARGET_ABI_BITS == 32
9227 #if defined(TARGET_MIPS)
9228 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9229 target_offset64(arg5, arg6), arg7));
9231 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9232 target_offset64(arg4, arg5), arg6));
9233 #endif /* !TARGET_MIPS */
9235 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9239 #if defined(TARGET_NR_sync_file_range2)
9240 case TARGET_NR_sync_file_range2:
9241 /* This is like sync_file_range but the arguments are reordered */
9242 #if TARGET_ABI_BITS == 32
9243 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9244 target_offset64(arg5, arg6), arg2));
9246 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9251 #if defined(CONFIG_EPOLL)
9252 #if defined(TARGET_NR_epoll_create)
9253 case TARGET_NR_epoll_create:
9254 ret = get_errno(epoll_create(arg1));
9257 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9258 case TARGET_NR_epoll_create1:
9259 ret = get_errno(epoll_create1(arg1));
9262 #if defined(TARGET_NR_epoll_ctl)
9263 case TARGET_NR_epoll_ctl:
9265 struct epoll_event ep;
9266 struct epoll_event *epp = 0;
9268 struct target_epoll_event *target_ep;
9269 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9272 ep.events = tswap32(target_ep->events);
9273 /* The epoll_data_t union is just opaque data to the kernel,
9274 * so we transfer all 64 bits across and need not worry what
9275 * actual data type it is.
9277 ep.data.u64 = tswap64(target_ep->data.u64);
9278 unlock_user_struct(target_ep, arg4, 0);
9281 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9286 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9287 #define IMPLEMENT_EPOLL_PWAIT
9289 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9290 #if defined(TARGET_NR_epoll_wait)
9291 case TARGET_NR_epoll_wait:
9293 #if defined(IMPLEMENT_EPOLL_PWAIT)
9294 case TARGET_NR_epoll_pwait:
9297 struct target_epoll_event *target_ep;
9298 struct epoll_event *ep;
9300 int maxevents = arg3;
9303 target_ep = lock_user(VERIFY_WRITE, arg2,
9304 maxevents * sizeof(struct target_epoll_event), 1);
9309 ep = alloca(maxevents * sizeof(struct epoll_event));
9312 #if defined(IMPLEMENT_EPOLL_PWAIT)
9313 case TARGET_NR_epoll_pwait:
9315 target_sigset_t *target_set;
9316 sigset_t _set, *set = &_set;
9319 target_set = lock_user(VERIFY_READ, arg5,
9320 sizeof(target_sigset_t), 1);
9322 unlock_user(target_ep, arg2, 0);
9325 target_to_host_sigset(set, target_set);
9326 unlock_user(target_set, arg5, 0);
9331 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9335 #if defined(TARGET_NR_epoll_wait)
9336 case TARGET_NR_epoll_wait:
9337 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9341 ret = -TARGET_ENOSYS;
9343 if (!is_error(ret)) {
9345 for (i = 0; i < ret; i++) {
9346 target_ep[i].events = tswap32(ep[i].events);
9347 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9350 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9355 #ifdef TARGET_NR_prlimit64
9356 case TARGET_NR_prlimit64:
9358 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9359 struct target_rlimit64 *target_rnew, *target_rold;
9360 struct host_rlimit64 rnew, rold, *rnewp = 0;
9362 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9365 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9366 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9367 unlock_user_struct(target_rnew, arg3, 0);
9371 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
9372 if (!is_error(ret) && arg4) {
9373 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9376 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9377 target_rold->rlim_max = tswap64(rold.rlim_max);
9378 unlock_user_struct(target_rold, arg4, 1);
9383 #ifdef TARGET_NR_gethostname
9384 case TARGET_NR_gethostname:
9386 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9388 ret = get_errno(gethostname(name, arg2));
9389 unlock_user(name, arg1, arg2);
9391 ret = -TARGET_EFAULT;
9396 #ifdef TARGET_NR_atomic_cmpxchg_32
9397 case TARGET_NR_atomic_cmpxchg_32:
9399 /* should use start_exclusive from main.c */
9400 abi_ulong mem_value;
9401 if (get_user_u32(mem_value, arg6)) {
9402 target_siginfo_t info;
9403 info.si_signo = SIGSEGV;
9405 info.si_code = TARGET_SEGV_MAPERR;
9406 info._sifields._sigfault._addr = arg6;
9407 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9411 if (mem_value == arg2)
9412 put_user_u32(arg1, arg6);
9417 #ifdef TARGET_NR_atomic_barrier
9418 case TARGET_NR_atomic_barrier:
9420 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9426 #ifdef TARGET_NR_timer_create
9427 case TARGET_NR_timer_create:
9429 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9431 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9432 struct target_timer_t *ptarget_timer;
9435 int timer_index = next_free_host_timer();
9437 if (timer_index < 0) {
9438 ret = -TARGET_EAGAIN;
9440 timer_t *phtimer = g_posix_timers + timer_index;
9443 phost_sevp = &host_sevp;
9444 ret = target_to_host_sigevent(phost_sevp, arg2);
9450 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9454 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) {
9457 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index);
9458 unlock_user_struct(ptarget_timer, arg3, 1);
9465 #ifdef TARGET_NR_timer_settime
9466 case TARGET_NR_timer_settime:
9468 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9469 * struct itimerspec * old_value */
9471 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9472 ret = -TARGET_EINVAL;
9474 timer_t htimer = g_posix_timers[arg1];
9475 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9477 target_to_host_itimerspec(&hspec_new, arg3);
9479 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9480 host_to_target_itimerspec(arg2, &hspec_old);
9486 #ifdef TARGET_NR_timer_gettime
9487 case TARGET_NR_timer_gettime:
9489 /* args: timer_t timerid, struct itimerspec *curr_value */
9492 return -TARGET_EFAULT;
9493 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9494 ret = -TARGET_EINVAL;
9496 timer_t htimer = g_posix_timers[arg1];
9497 struct itimerspec hspec;
9498 ret = get_errno(timer_gettime(htimer, &hspec));
9500 if (host_to_target_itimerspec(arg2, &hspec)) {
9501 ret = -TARGET_EFAULT;
9508 #ifdef TARGET_NR_timer_getoverrun
9509 case TARGET_NR_timer_getoverrun:
9511 /* args: timer_t timerid */
9513 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9514 ret = -TARGET_EINVAL;
9516 timer_t htimer = g_posix_timers[arg1];
9517 ret = get_errno(timer_getoverrun(htimer));
9523 #ifdef TARGET_NR_timer_delete
9524 case TARGET_NR_timer_delete:
9526 /* args: timer_t timerid */
9528 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9529 ret = -TARGET_EINVAL;
9531 timer_t htimer = g_posix_timers[arg1];
9532 ret = get_errno(timer_delete(htimer));
9533 g_posix_timers[arg1] = 0;
9541 gemu_log("qemu: Unsupported syscall: %d\n", num);
9542 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9543 unimplemented_nowarn:
9545 ret = -TARGET_ENOSYS;
9550 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9553 print_syscall_ret(num, ret);
9556 ret = -TARGET_EFAULT;