4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
37 #include <linux/capability.h>
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
108 #include <linux/audit.h>
109 #include "linux_loop.h"
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
118 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
119 * once. This exercises the codepaths for restart.
121 //#define DEBUG_ERESTARTSYS
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 /* This is the size of the host kernel's sigset_t, needed where we make
128 * direct system calls that take a sigset_t pointer and a size.
130 #define SIGSET_T_SIZE (_NSIG / 8)
140 #define _syscall0(type,name) \
141 static type name (void) \
143 return syscall(__NR_##name); \
146 #define _syscall1(type,name,type1,arg1) \
147 static type name (type1 arg1) \
149 return syscall(__NR_##name, arg1); \
152 #define _syscall2(type,name,type1,arg1,type2,arg2) \
153 static type name (type1 arg1,type2 arg2) \
155 return syscall(__NR_##name, arg1, arg2); \
158 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
159 static type name (type1 arg1,type2 arg2,type3 arg3) \
161 return syscall(__NR_##name, arg1, arg2, arg3); \
164 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
170 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
178 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
179 type5,arg5,type6,arg6) \
180 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
183 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
187 #define __NR_sys_uname __NR_uname
188 #define __NR_sys_getcwd1 __NR_getcwd
189 #define __NR_sys_getdents __NR_getdents
190 #define __NR_sys_getdents64 __NR_getdents64
191 #define __NR_sys_getpriority __NR_getpriority
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_futex __NR_futex
195 #define __NR_sys_inotify_init __NR_inotify_init
196 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
197 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
199 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
201 #define __NR__llseek __NR_lseek
204 /* Newer kernel ports have llseek() instead of _llseek() */
205 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
206 #define TARGET_NR__llseek TARGET_NR_llseek
210 _syscall0(int, gettid)
212 /* This is a replacement for the host gettid() and must return a host
214 static int gettid(void) {
218 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
219 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
221 #if !defined(__NR_getdents) || \
222 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
223 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
225 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
226 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
227 loff_t *, res, uint, wh);
229 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
230 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
237 #if defined(TARGET_NR_futex) && defined(__NR_futex)
238 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
239 const struct timespec *,timeout,int *,uaddr2,int,val3)
241 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
242 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
243 unsigned long *, user_mask_ptr);
244 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
245 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
246 unsigned long *, user_mask_ptr);
247 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
249 _syscall2(int, capget, struct __user_cap_header_struct *, header,
250 struct __user_cap_data_struct *, data);
251 _syscall2(int, capset, struct __user_cap_header_struct *, header,
252 struct __user_cap_data_struct *, data);
253 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
254 _syscall2(int, ioprio_get, int, which, int, who)
256 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
257 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
259 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
260 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
263 static bitmask_transtbl fcntl_flags_tbl[] = {
264 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
265 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
266 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
267 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
268 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
269 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
270 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
271 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
272 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
273 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
274 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
275 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
276 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
277 #if defined(O_DIRECT)
278 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
280 #if defined(O_NOATIME)
281 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
283 #if defined(O_CLOEXEC)
284 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
287 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
289 /* Don't terminate the list prematurely on 64-bit host+guest. */
290 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
291 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
296 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
297 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
298 typedef struct TargetFdTrans {
299 TargetFdDataFunc host_to_target_data;
300 TargetFdDataFunc target_to_host_data;
301 TargetFdAddrFunc target_to_host_addr;
304 static TargetFdTrans **target_fd_trans;
306 static unsigned int target_fd_max;
308 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
310 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
311 return target_fd_trans[fd]->target_to_host_data;
316 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
318 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
319 return target_fd_trans[fd]->host_to_target_data;
324 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
326 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
327 return target_fd_trans[fd]->target_to_host_addr;
332 static void fd_trans_register(int fd, TargetFdTrans *trans)
336 if (fd >= target_fd_max) {
337 oldmax = target_fd_max;
338 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
339 target_fd_trans = g_renew(TargetFdTrans *,
340 target_fd_trans, target_fd_max);
341 memset((void *)(target_fd_trans + oldmax), 0,
342 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
344 target_fd_trans[fd] = trans;
347 static void fd_trans_unregister(int fd)
349 if (fd >= 0 && fd < target_fd_max) {
350 target_fd_trans[fd] = NULL;
354 static void fd_trans_dup(int oldfd, int newfd)
356 fd_trans_unregister(newfd);
357 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
358 fd_trans_register(newfd, target_fd_trans[oldfd]);
362 static int sys_getcwd1(char *buf, size_t size)
364 if (getcwd(buf, size) == NULL) {
365 /* getcwd() sets errno */
368 return strlen(buf)+1;
371 #ifdef TARGET_NR_utimensat
372 #ifdef CONFIG_UTIMENSAT
373 static int sys_utimensat(int dirfd, const char *pathname,
374 const struct timespec times[2], int flags)
376 if (pathname == NULL)
377 return futimens(dirfd, times);
379 return utimensat(dirfd, pathname, times, flags);
381 #elif defined(__NR_utimensat)
382 #define __NR_sys_utimensat __NR_utimensat
383 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
384 const struct timespec *,tsp,int,flags)
386 static int sys_utimensat(int dirfd, const char *pathname,
387 const struct timespec times[2], int flags)
393 #endif /* TARGET_NR_utimensat */
395 #ifdef CONFIG_INOTIFY
396 #include <sys/inotify.h>
398 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
399 static int sys_inotify_init(void)
401 return (inotify_init());
404 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
405 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
407 return (inotify_add_watch(fd, pathname, mask));
410 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
411 static int sys_inotify_rm_watch(int fd, int32_t wd)
413 return (inotify_rm_watch(fd, wd));
416 #ifdef CONFIG_INOTIFY1
417 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
418 static int sys_inotify_init1(int flags)
420 return (inotify_init1(flags));
425 /* Userspace can usually survive runtime without inotify */
426 #undef TARGET_NR_inotify_init
427 #undef TARGET_NR_inotify_init1
428 #undef TARGET_NR_inotify_add_watch
429 #undef TARGET_NR_inotify_rm_watch
430 #endif /* CONFIG_INOTIFY */
432 #if defined(TARGET_NR_ppoll)
434 # define __NR_ppoll -1
436 #define __NR_sys_ppoll __NR_ppoll
437 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
438 struct timespec *, timeout, const sigset_t *, sigmask,
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453 const struct host_rlimit64 *, new_limit,
454 struct host_rlimit64 *, old_limit)
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467 if (g_posix_timers[k] == 0) {
468 g_posix_timers[k] = (timer_t) 1;
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
478 static inline int regpairs_aligned(void *cpu_env) {
479 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
481 #elif defined(TARGET_MIPS)
482 static inline int regpairs_aligned(void *cpu_env) { return 1; }
483 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
484 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
485 * of registers which translates to the same as ARM/MIPS, because we start with
487 static inline int regpairs_aligned(void *cpu_env) { return 1; }
489 static inline int regpairs_aligned(void *cpu_env) { return 0; }
492 #define ERRNO_TABLE_SIZE 1200
494 /* target_to_host_errno_table[] is initialized from
495 * host_to_target_errno_table[] in syscall_init(). */
496 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
500 * This list is the union of errno values overridden in asm-<arch>/errno.h
501 * minus the errnos that are not actually generic to all archs.
503 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
504 [EAGAIN] = TARGET_EAGAIN,
505 [EIDRM] = TARGET_EIDRM,
506 [ECHRNG] = TARGET_ECHRNG,
507 [EL2NSYNC] = TARGET_EL2NSYNC,
508 [EL3HLT] = TARGET_EL3HLT,
509 [EL3RST] = TARGET_EL3RST,
510 [ELNRNG] = TARGET_ELNRNG,
511 [EUNATCH] = TARGET_EUNATCH,
512 [ENOCSI] = TARGET_ENOCSI,
513 [EL2HLT] = TARGET_EL2HLT,
514 [EDEADLK] = TARGET_EDEADLK,
515 [ENOLCK] = TARGET_ENOLCK,
516 [EBADE] = TARGET_EBADE,
517 [EBADR] = TARGET_EBADR,
518 [EXFULL] = TARGET_EXFULL,
519 [ENOANO] = TARGET_ENOANO,
520 [EBADRQC] = TARGET_EBADRQC,
521 [EBADSLT] = TARGET_EBADSLT,
522 [EBFONT] = TARGET_EBFONT,
523 [ENOSTR] = TARGET_ENOSTR,
524 [ENODATA] = TARGET_ENODATA,
525 [ETIME] = TARGET_ETIME,
526 [ENOSR] = TARGET_ENOSR,
527 [ENONET] = TARGET_ENONET,
528 [ENOPKG] = TARGET_ENOPKG,
529 [EREMOTE] = TARGET_EREMOTE,
530 [ENOLINK] = TARGET_ENOLINK,
531 [EADV] = TARGET_EADV,
532 [ESRMNT] = TARGET_ESRMNT,
533 [ECOMM] = TARGET_ECOMM,
534 [EPROTO] = TARGET_EPROTO,
535 [EDOTDOT] = TARGET_EDOTDOT,
536 [EMULTIHOP] = TARGET_EMULTIHOP,
537 [EBADMSG] = TARGET_EBADMSG,
538 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
539 [EOVERFLOW] = TARGET_EOVERFLOW,
540 [ENOTUNIQ] = TARGET_ENOTUNIQ,
541 [EBADFD] = TARGET_EBADFD,
542 [EREMCHG] = TARGET_EREMCHG,
543 [ELIBACC] = TARGET_ELIBACC,
544 [ELIBBAD] = TARGET_ELIBBAD,
545 [ELIBSCN] = TARGET_ELIBSCN,
546 [ELIBMAX] = TARGET_ELIBMAX,
547 [ELIBEXEC] = TARGET_ELIBEXEC,
548 [EILSEQ] = TARGET_EILSEQ,
549 [ENOSYS] = TARGET_ENOSYS,
550 [ELOOP] = TARGET_ELOOP,
551 [ERESTART] = TARGET_ERESTART,
552 [ESTRPIPE] = TARGET_ESTRPIPE,
553 [ENOTEMPTY] = TARGET_ENOTEMPTY,
554 [EUSERS] = TARGET_EUSERS,
555 [ENOTSOCK] = TARGET_ENOTSOCK,
556 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
557 [EMSGSIZE] = TARGET_EMSGSIZE,
558 [EPROTOTYPE] = TARGET_EPROTOTYPE,
559 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
560 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
561 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
562 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
563 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
564 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
565 [EADDRINUSE] = TARGET_EADDRINUSE,
566 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
567 [ENETDOWN] = TARGET_ENETDOWN,
568 [ENETUNREACH] = TARGET_ENETUNREACH,
569 [ENETRESET] = TARGET_ENETRESET,
570 [ECONNABORTED] = TARGET_ECONNABORTED,
571 [ECONNRESET] = TARGET_ECONNRESET,
572 [ENOBUFS] = TARGET_ENOBUFS,
573 [EISCONN] = TARGET_EISCONN,
574 [ENOTCONN] = TARGET_ENOTCONN,
575 [EUCLEAN] = TARGET_EUCLEAN,
576 [ENOTNAM] = TARGET_ENOTNAM,
577 [ENAVAIL] = TARGET_ENAVAIL,
578 [EISNAM] = TARGET_EISNAM,
579 [EREMOTEIO] = TARGET_EREMOTEIO,
580 [ESHUTDOWN] = TARGET_ESHUTDOWN,
581 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
582 [ETIMEDOUT] = TARGET_ETIMEDOUT,
583 [ECONNREFUSED] = TARGET_ECONNREFUSED,
584 [EHOSTDOWN] = TARGET_EHOSTDOWN,
585 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
586 [EALREADY] = TARGET_EALREADY,
587 [EINPROGRESS] = TARGET_EINPROGRESS,
588 [ESTALE] = TARGET_ESTALE,
589 [ECANCELED] = TARGET_ECANCELED,
590 [ENOMEDIUM] = TARGET_ENOMEDIUM,
591 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
593 [ENOKEY] = TARGET_ENOKEY,
596 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
599 [EKEYREVOKED] = TARGET_EKEYREVOKED,
602 [EKEYREJECTED] = TARGET_EKEYREJECTED,
605 [EOWNERDEAD] = TARGET_EOWNERDEAD,
607 #ifdef ENOTRECOVERABLE
608 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
612 static inline int host_to_target_errno(int err)
614 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
615 host_to_target_errno_table[err]) {
616 return host_to_target_errno_table[err];
621 static inline int target_to_host_errno(int err)
623 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
624 target_to_host_errno_table[err]) {
625 return target_to_host_errno_table[err];
630 static inline abi_long get_errno(abi_long ret)
633 return -host_to_target_errno(errno);
638 static inline int is_error(abi_long ret)
640 return (abi_ulong)ret >= (abi_ulong)(-4096);
643 char *target_strerror(int err)
645 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
648 return strerror(target_to_host_errno(err));
651 #define safe_syscall0(type, name) \
652 static type safe_##name(void) \
654 return safe_syscall(__NR_##name); \
657 #define safe_syscall1(type, name, type1, arg1) \
658 static type safe_##name(type1 arg1) \
660 return safe_syscall(__NR_##name, arg1); \
663 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
664 static type safe_##name(type1 arg1, type2 arg2) \
666 return safe_syscall(__NR_##name, arg1, arg2); \
669 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
675 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
679 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
682 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
683 type4, arg4, type5, arg5) \
684 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
690 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
691 type4, arg4, type5, arg5, type6, arg6) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
693 type5 arg5, type6 arg6) \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
698 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
699 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
700 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
701 int, flags, mode_t, mode)
702 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
703 struct rusage *, rusage)
704 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
705 int, options, struct rusage *, rusage)
706 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
707 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
708 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
709 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
710 const struct timespec *,timeout,int *,uaddr2,int,val3)
711 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
712 safe_syscall2(int, kill, pid_t, pid, int, sig)
713 safe_syscall2(int, tkill, int, tid, int, sig)
714 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
715 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
716 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
717 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
719 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
720 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
721 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
722 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
723 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
724 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
725 safe_syscall2(int, flock, int, fd, int, operation)
727 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
729 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
730 long, msgtype, int, flags)
732 /* This host kernel architecture uses a single ipc syscall; fake up
733 * wrappers for the sub-operations to hide this implementation detail.
734 * Annoyingly we can't include linux/ipc.h to get the constant definitions
735 * for the call parameter because some structs in there conflict with the
736 * sys/ipc.h ones. So we just define them here, and rely on them being
737 * the same for all host architectures.
741 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
743 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
744 void *, ptr, long, fifth)
745 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
747 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
749 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
751 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
754 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
755 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
756 size_t, len, unsigned, prio, const struct timespec *, timeout)
757 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
758 size_t, len, unsigned *, prio, const struct timespec *, timeout)
761 static inline int host_to_target_sock_type(int host_type)
765 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
767 target_type = TARGET_SOCK_DGRAM;
770 target_type = TARGET_SOCK_STREAM;
773 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
777 #if defined(SOCK_CLOEXEC)
778 if (host_type & SOCK_CLOEXEC) {
779 target_type |= TARGET_SOCK_CLOEXEC;
783 #if defined(SOCK_NONBLOCK)
784 if (host_type & SOCK_NONBLOCK) {
785 target_type |= TARGET_SOCK_NONBLOCK;
792 static abi_ulong target_brk;
793 static abi_ulong target_original_brk;
794 static abi_ulong brk_page;
796 void target_set_brk(abi_ulong new_brk)
798 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
799 brk_page = HOST_PAGE_ALIGN(target_brk);
802 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
803 #define DEBUGF_BRK(message, args...)
805 /* do_brk() must return target values and target errnos. */
806 abi_long do_brk(abi_ulong new_brk)
808 abi_long mapped_addr;
811 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
814 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
817 if (new_brk < target_original_brk) {
818 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
823 /* If the new brk is less than the highest page reserved to the
824 * target heap allocation, set it and we're almost done... */
825 if (new_brk <= brk_page) {
826 /* Heap contents are initialized to zero, as for anonymous
828 if (new_brk > target_brk) {
829 memset(g2h(target_brk), 0, new_brk - target_brk);
831 target_brk = new_brk;
832 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
836 /* We need to allocate more memory after the brk... Note that
837 * we don't use MAP_FIXED because that will map over the top of
838 * any existing mapping (like the one with the host libc or qemu
839 * itself); instead we treat "mapped but at wrong address" as
840 * a failure and unmap again.
842 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
843 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
844 PROT_READ|PROT_WRITE,
845 MAP_ANON|MAP_PRIVATE, 0, 0));
847 if (mapped_addr == brk_page) {
848 /* Heap contents are initialized to zero, as for anonymous
849 * mapped pages. Technically the new pages are already
850 * initialized to zero since they *are* anonymous mapped
851 * pages, however we have to take care with the contents that
852 * come from the remaining part of the previous page: it may
853 * contains garbage data due to a previous heap usage (grown
855 memset(g2h(target_brk), 0, brk_page - target_brk);
857 target_brk = new_brk;
858 brk_page = HOST_PAGE_ALIGN(target_brk);
859 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
862 } else if (mapped_addr != -1) {
863 /* Mapped but at wrong address, meaning there wasn't actually
864 * enough space for this brk.
866 target_munmap(mapped_addr, new_alloc_size);
868 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
871 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
874 #if defined(TARGET_ALPHA)
875 /* We (partially) emulate OSF/1 on Alpha, which requires we
876 return a proper errno, not an unchanged brk value. */
877 return -TARGET_ENOMEM;
879 /* For everything else, return the previous break. */
883 static inline abi_long copy_from_user_fdset(fd_set *fds,
884 abi_ulong target_fds_addr,
888 abi_ulong b, *target_fds;
890 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
891 if (!(target_fds = lock_user(VERIFY_READ,
893 sizeof(abi_ulong) * nw,
895 return -TARGET_EFAULT;
899 for (i = 0; i < nw; i++) {
900 /* grab the abi_ulong */
901 __get_user(b, &target_fds[i]);
902 for (j = 0; j < TARGET_ABI_BITS; j++) {
903 /* check the bit inside the abi_ulong */
910 unlock_user(target_fds, target_fds_addr, 0);
915 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
916 abi_ulong target_fds_addr,
919 if (target_fds_addr) {
920 if (copy_from_user_fdset(fds, target_fds_addr, n))
921 return -TARGET_EFAULT;
929 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
935 abi_ulong *target_fds;
937 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
938 if (!(target_fds = lock_user(VERIFY_WRITE,
940 sizeof(abi_ulong) * nw,
942 return -TARGET_EFAULT;
945 for (i = 0; i < nw; i++) {
947 for (j = 0; j < TARGET_ABI_BITS; j++) {
948 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
951 __put_user(v, &target_fds[i]);
954 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
959 #if defined(__alpha__)
965 static inline abi_long host_to_target_clock_t(long ticks)
967 #if HOST_HZ == TARGET_HZ
970 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
974 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
975 const struct rusage *rusage)
977 struct target_rusage *target_rusage;
979 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
980 return -TARGET_EFAULT;
981 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
982 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
983 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
984 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
985 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
986 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
987 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
988 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
989 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
990 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
991 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
992 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
993 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
994 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
995 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
996 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
997 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
998 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
999 unlock_user_struct(target_rusage, target_addr, 1);
1004 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1006 abi_ulong target_rlim_swap;
1009 target_rlim_swap = tswapal(target_rlim);
1010 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1011 return RLIM_INFINITY;
1013 result = target_rlim_swap;
1014 if (target_rlim_swap != (rlim_t)result)
1015 return RLIM_INFINITY;
1020 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1022 abi_ulong target_rlim_swap;
1025 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1026 target_rlim_swap = TARGET_RLIM_INFINITY;
1028 target_rlim_swap = rlim;
1029 result = tswapal(target_rlim_swap);
1034 static inline int target_to_host_resource(int code)
1037 case TARGET_RLIMIT_AS:
1039 case TARGET_RLIMIT_CORE:
1041 case TARGET_RLIMIT_CPU:
1043 case TARGET_RLIMIT_DATA:
1045 case TARGET_RLIMIT_FSIZE:
1046 return RLIMIT_FSIZE;
1047 case TARGET_RLIMIT_LOCKS:
1048 return RLIMIT_LOCKS;
1049 case TARGET_RLIMIT_MEMLOCK:
1050 return RLIMIT_MEMLOCK;
1051 case TARGET_RLIMIT_MSGQUEUE:
1052 return RLIMIT_MSGQUEUE;
1053 case TARGET_RLIMIT_NICE:
1055 case TARGET_RLIMIT_NOFILE:
1056 return RLIMIT_NOFILE;
1057 case TARGET_RLIMIT_NPROC:
1058 return RLIMIT_NPROC;
1059 case TARGET_RLIMIT_RSS:
1061 case TARGET_RLIMIT_RTPRIO:
1062 return RLIMIT_RTPRIO;
1063 case TARGET_RLIMIT_SIGPENDING:
1064 return RLIMIT_SIGPENDING;
1065 case TARGET_RLIMIT_STACK:
1066 return RLIMIT_STACK;
1072 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1073 abi_ulong target_tv_addr)
1075 struct target_timeval *target_tv;
1077 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1078 return -TARGET_EFAULT;
1080 __get_user(tv->tv_sec, &target_tv->tv_sec);
1081 __get_user(tv->tv_usec, &target_tv->tv_usec);
1083 unlock_user_struct(target_tv, target_tv_addr, 0);
1088 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1089 const struct timeval *tv)
1091 struct target_timeval *target_tv;
1093 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1094 return -TARGET_EFAULT;
1096 __put_user(tv->tv_sec, &target_tv->tv_sec);
1097 __put_user(tv->tv_usec, &target_tv->tv_usec);
1099 unlock_user_struct(target_tv, target_tv_addr, 1);
1104 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1105 abi_ulong target_tz_addr)
1107 struct target_timezone *target_tz;
1109 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1110 return -TARGET_EFAULT;
1113 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1114 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1116 unlock_user_struct(target_tz, target_tz_addr, 0);
1121 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1124 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1125 abi_ulong target_mq_attr_addr)
1127 struct target_mq_attr *target_mq_attr;
1129 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1130 target_mq_attr_addr, 1))
1131 return -TARGET_EFAULT;
1133 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1134 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1135 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1136 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1138 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1143 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1144 const struct mq_attr *attr)
1146 struct target_mq_attr *target_mq_attr;
1148 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1149 target_mq_attr_addr, 0))
1150 return -TARGET_EFAULT;
1152 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1153 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1154 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1155 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1157 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1163 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1164 /* do_select() must return target values and target errnos. */
1165 static abi_long do_select(int n,
1166 abi_ulong rfd_addr, abi_ulong wfd_addr,
1167 abi_ulong efd_addr, abi_ulong target_tv_addr)
1169 fd_set rfds, wfds, efds;
1170 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1172 struct timespec ts, *ts_ptr;
1175 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1179 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1183 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1188 if (target_tv_addr) {
1189 if (copy_from_user_timeval(&tv, target_tv_addr))
1190 return -TARGET_EFAULT;
1191 ts.tv_sec = tv.tv_sec;
1192 ts.tv_nsec = tv.tv_usec * 1000;
1198 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1201 if (!is_error(ret)) {
1202 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1203 return -TARGET_EFAULT;
1204 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1205 return -TARGET_EFAULT;
1206 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1207 return -TARGET_EFAULT;
1209 if (target_tv_addr) {
1210 tv.tv_sec = ts.tv_sec;
1211 tv.tv_usec = ts.tv_nsec / 1000;
1212 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1213 return -TARGET_EFAULT;
1222 static abi_long do_pipe2(int host_pipe[], int flags)
1225 return pipe2(host_pipe, flags);
1231 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1232 int flags, int is_pipe2)
1236 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1239 return get_errno(ret);
1241 /* Several targets have special calling conventions for the original
1242 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1244 #if defined(TARGET_ALPHA)
1245 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1246 return host_pipe[0];
1247 #elif defined(TARGET_MIPS)
1248 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1249 return host_pipe[0];
1250 #elif defined(TARGET_SH4)
1251 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1252 return host_pipe[0];
1253 #elif defined(TARGET_SPARC)
1254 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1255 return host_pipe[0];
1259 if (put_user_s32(host_pipe[0], pipedes)
1260 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1261 return -TARGET_EFAULT;
1262 return get_errno(ret);
1265 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1266 abi_ulong target_addr,
1269 struct target_ip_mreqn *target_smreqn;
1271 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1273 return -TARGET_EFAULT;
1274 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1275 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1276 if (len == sizeof(struct target_ip_mreqn))
1277 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1278 unlock_user(target_smreqn, target_addr, 0);
1283 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1284 abi_ulong target_addr,
1287 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1288 sa_family_t sa_family;
1289 struct target_sockaddr *target_saddr;
1291 if (fd_trans_target_to_host_addr(fd)) {
1292 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1295 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1297 return -TARGET_EFAULT;
1299 sa_family = tswap16(target_saddr->sa_family);
1301 /* Oops. The caller might send a incomplete sun_path; sun_path
1302 * must be terminated by \0 (see the manual page), but
1303 * unfortunately it is quite common to specify sockaddr_un
1304 * length as "strlen(x->sun_path)" while it should be
1305 * "strlen(...) + 1". We'll fix that here if needed.
1306 * Linux kernel has a similar feature.
1309 if (sa_family == AF_UNIX) {
1310 if (len < unix_maxlen && len > 0) {
1311 char *cp = (char*)target_saddr;
1313 if ( cp[len-1] && !cp[len] )
1316 if (len > unix_maxlen)
1320 memcpy(addr, target_saddr, len);
1321 addr->sa_family = sa_family;
1322 if (sa_family == AF_NETLINK) {
1323 struct sockaddr_nl *nladdr;
1325 nladdr = (struct sockaddr_nl *)addr;
1326 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1327 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1328 } else if (sa_family == AF_PACKET) {
1329 struct target_sockaddr_ll *lladdr;
1331 lladdr = (struct target_sockaddr_ll *)addr;
1332 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1333 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1335 unlock_user(target_saddr, target_addr, 0);
1340 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1341 struct sockaddr *addr,
1344 struct target_sockaddr *target_saddr;
1346 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1348 return -TARGET_EFAULT;
1349 memcpy(target_saddr, addr, len);
1350 target_saddr->sa_family = tswap16(addr->sa_family);
1351 if (addr->sa_family == AF_NETLINK) {
1352 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1353 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1354 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1356 unlock_user(target_saddr, target_addr, len);
1361 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1362 struct target_msghdr *target_msgh)
1364 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1365 abi_long msg_controllen;
1366 abi_ulong target_cmsg_addr;
1367 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1368 socklen_t space = 0;
1370 msg_controllen = tswapal(target_msgh->msg_controllen);
1371 if (msg_controllen < sizeof (struct target_cmsghdr))
1373 target_cmsg_addr = tswapal(target_msgh->msg_control);
1374 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1375 target_cmsg_start = target_cmsg;
1377 return -TARGET_EFAULT;
1379 while (cmsg && target_cmsg) {
1380 void *data = CMSG_DATA(cmsg);
1381 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1383 int len = tswapal(target_cmsg->cmsg_len)
1384 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1386 space += CMSG_SPACE(len);
1387 if (space > msgh->msg_controllen) {
1388 space -= CMSG_SPACE(len);
1389 /* This is a QEMU bug, since we allocated the payload
1390 * area ourselves (unlike overflow in host-to-target
1391 * conversion, which is just the guest giving us a buffer
1392 * that's too small). It can't happen for the payload types
1393 * we currently support; if it becomes an issue in future
1394 * we would need to improve our allocation strategy to
1395 * something more intelligent than "twice the size of the
1396 * target buffer we're reading from".
1398 gemu_log("Host cmsg overflow\n");
1402 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1403 cmsg->cmsg_level = SOL_SOCKET;
1405 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1407 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1408 cmsg->cmsg_len = CMSG_LEN(len);
1410 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1411 int *fd = (int *)data;
1412 int *target_fd = (int *)target_data;
1413 int i, numfds = len / sizeof(int);
1415 for (i = 0; i < numfds; i++) {
1416 __get_user(fd[i], target_fd + i);
1418 } else if (cmsg->cmsg_level == SOL_SOCKET
1419 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1420 struct ucred *cred = (struct ucred *)data;
1421 struct target_ucred *target_cred =
1422 (struct target_ucred *)target_data;
1424 __get_user(cred->pid, &target_cred->pid);
1425 __get_user(cred->uid, &target_cred->uid);
1426 __get_user(cred->gid, &target_cred->gid);
1428 gemu_log("Unsupported ancillary data: %d/%d\n",
1429 cmsg->cmsg_level, cmsg->cmsg_type);
1430 memcpy(data, target_data, len);
1433 cmsg = CMSG_NXTHDR(msgh, cmsg);
1434 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1437 unlock_user(target_cmsg, target_cmsg_addr, 0);
1439 msgh->msg_controllen = space;
1443 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1444 struct msghdr *msgh)
1446 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1447 abi_long msg_controllen;
1448 abi_ulong target_cmsg_addr;
1449 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1450 socklen_t space = 0;
1452 msg_controllen = tswapal(target_msgh->msg_controllen);
1453 if (msg_controllen < sizeof (struct target_cmsghdr))
1455 target_cmsg_addr = tswapal(target_msgh->msg_control);
1456 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1457 target_cmsg_start = target_cmsg;
1459 return -TARGET_EFAULT;
1461 while (cmsg && target_cmsg) {
1462 void *data = CMSG_DATA(cmsg);
1463 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1465 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1466 int tgt_len, tgt_space;
1468 /* We never copy a half-header but may copy half-data;
1469 * this is Linux's behaviour in put_cmsg(). Note that
1470 * truncation here is a guest problem (which we report
1471 * to the guest via the CTRUNC bit), unlike truncation
1472 * in target_to_host_cmsg, which is a QEMU bug.
1474 if (msg_controllen < sizeof(struct cmsghdr)) {
1475 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1479 if (cmsg->cmsg_level == SOL_SOCKET) {
1480 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1482 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1484 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1486 tgt_len = TARGET_CMSG_LEN(len);
1488 /* Payload types which need a different size of payload on
1489 * the target must adjust tgt_len here.
1491 switch (cmsg->cmsg_level) {
1493 switch (cmsg->cmsg_type) {
1495 tgt_len = sizeof(struct target_timeval);
1504 if (msg_controllen < tgt_len) {
1505 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1506 tgt_len = msg_controllen;
1509 /* We must now copy-and-convert len bytes of payload
1510 * into tgt_len bytes of destination space. Bear in mind
1511 * that in both source and destination we may be dealing
1512 * with a truncated value!
1514 switch (cmsg->cmsg_level) {
1516 switch (cmsg->cmsg_type) {
1519 int *fd = (int *)data;
1520 int *target_fd = (int *)target_data;
1521 int i, numfds = tgt_len / sizeof(int);
1523 for (i = 0; i < numfds; i++) {
1524 __put_user(fd[i], target_fd + i);
1530 struct timeval *tv = (struct timeval *)data;
1531 struct target_timeval *target_tv =
1532 (struct target_timeval *)target_data;
1534 if (len != sizeof(struct timeval) ||
1535 tgt_len != sizeof(struct target_timeval)) {
1539 /* copy struct timeval to target */
1540 __put_user(tv->tv_sec, &target_tv->tv_sec);
1541 __put_user(tv->tv_usec, &target_tv->tv_usec);
1544 case SCM_CREDENTIALS:
1546 struct ucred *cred = (struct ucred *)data;
1547 struct target_ucred *target_cred =
1548 (struct target_ucred *)target_data;
1550 __put_user(cred->pid, &target_cred->pid);
1551 __put_user(cred->uid, &target_cred->uid);
1552 __put_user(cred->gid, &target_cred->gid);
1562 gemu_log("Unsupported ancillary data: %d/%d\n",
1563 cmsg->cmsg_level, cmsg->cmsg_type);
1564 memcpy(target_data, data, MIN(len, tgt_len));
1565 if (tgt_len > len) {
1566 memset(target_data + len, 0, tgt_len - len);
1570 target_cmsg->cmsg_len = tswapal(tgt_len);
1571 tgt_space = TARGET_CMSG_SPACE(len);
1572 if (msg_controllen < tgt_space) {
1573 tgt_space = msg_controllen;
1575 msg_controllen -= tgt_space;
1577 cmsg = CMSG_NXTHDR(msgh, cmsg);
1578 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1581 unlock_user(target_cmsg, target_cmsg_addr, space);
1583 target_msgh->msg_controllen = tswapal(space);
1587 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1589 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1590 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1591 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1592 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1593 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1596 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1598 abi_long (*host_to_target_nlmsg)
1599 (struct nlmsghdr *))
1604 while (len > sizeof(struct nlmsghdr)) {
1606 nlmsg_len = nlh->nlmsg_len;
1607 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1612 switch (nlh->nlmsg_type) {
1614 tswap_nlmsghdr(nlh);
1620 struct nlmsgerr *e = NLMSG_DATA(nlh);
1621 e->error = tswap32(e->error);
1622 tswap_nlmsghdr(&e->msg);
1623 tswap_nlmsghdr(nlh);
1627 ret = host_to_target_nlmsg(nlh);
1629 tswap_nlmsghdr(nlh);
1634 tswap_nlmsghdr(nlh);
1635 len -= NLMSG_ALIGN(nlmsg_len);
1636 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1641 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1643 abi_long (*target_to_host_nlmsg)
1644 (struct nlmsghdr *))
1648 while (len > sizeof(struct nlmsghdr)) {
1649 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1650 tswap32(nlh->nlmsg_len) > len) {
1653 tswap_nlmsghdr(nlh);
1654 switch (nlh->nlmsg_type) {
1661 struct nlmsgerr *e = NLMSG_DATA(nlh);
1662 e->error = tswap32(e->error);
1663 tswap_nlmsghdr(&e->msg);
1666 ret = target_to_host_nlmsg(nlh);
1671 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1672 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1677 #ifdef CONFIG_RTNETLINK
1678 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1680 abi_long (*host_to_target_rtattr)
1683 unsigned short rta_len;
1686 while (len > sizeof(struct rtattr)) {
1687 rta_len = rtattr->rta_len;
1688 if (rta_len < sizeof(struct rtattr) ||
1692 ret = host_to_target_rtattr(rtattr);
1693 rtattr->rta_len = tswap16(rtattr->rta_len);
1694 rtattr->rta_type = tswap16(rtattr->rta_type);
1698 len -= RTA_ALIGN(rta_len);
1699 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1704 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
1707 struct rtnl_link_stats *st;
1708 struct rtnl_link_stats64 *st64;
1709 struct rtnl_link_ifmap *map;
1711 switch (rtattr->rta_type) {
1714 case IFLA_BROADCAST:
1720 case IFLA_OPERSTATE:
1723 case IFLA_PROTO_DOWN:
1730 case IFLA_CARRIER_CHANGES:
1731 case IFLA_NUM_RX_QUEUES:
1732 case IFLA_NUM_TX_QUEUES:
1733 case IFLA_PROMISCUITY:
1735 case IFLA_LINK_NETNSID:
1739 u32 = RTA_DATA(rtattr);
1740 *u32 = tswap32(*u32);
1742 /* struct rtnl_link_stats */
1744 st = RTA_DATA(rtattr);
1745 st->rx_packets = tswap32(st->rx_packets);
1746 st->tx_packets = tswap32(st->tx_packets);
1747 st->rx_bytes = tswap32(st->rx_bytes);
1748 st->tx_bytes = tswap32(st->tx_bytes);
1749 st->rx_errors = tswap32(st->rx_errors);
1750 st->tx_errors = tswap32(st->tx_errors);
1751 st->rx_dropped = tswap32(st->rx_dropped);
1752 st->tx_dropped = tswap32(st->tx_dropped);
1753 st->multicast = tswap32(st->multicast);
1754 st->collisions = tswap32(st->collisions);
1756 /* detailed rx_errors: */
1757 st->rx_length_errors = tswap32(st->rx_length_errors);
1758 st->rx_over_errors = tswap32(st->rx_over_errors);
1759 st->rx_crc_errors = tswap32(st->rx_crc_errors);
1760 st->rx_frame_errors = tswap32(st->rx_frame_errors);
1761 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
1762 st->rx_missed_errors = tswap32(st->rx_missed_errors);
1764 /* detailed tx_errors */
1765 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
1766 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
1767 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
1768 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
1769 st->tx_window_errors = tswap32(st->tx_window_errors);
1772 st->rx_compressed = tswap32(st->rx_compressed);
1773 st->tx_compressed = tswap32(st->tx_compressed);
1775 /* struct rtnl_link_stats64 */
1777 st64 = RTA_DATA(rtattr);
1778 st64->rx_packets = tswap64(st64->rx_packets);
1779 st64->tx_packets = tswap64(st64->tx_packets);
1780 st64->rx_bytes = tswap64(st64->rx_bytes);
1781 st64->tx_bytes = tswap64(st64->tx_bytes);
1782 st64->rx_errors = tswap64(st64->rx_errors);
1783 st64->tx_errors = tswap64(st64->tx_errors);
1784 st64->rx_dropped = tswap64(st64->rx_dropped);
1785 st64->tx_dropped = tswap64(st64->tx_dropped);
1786 st64->multicast = tswap64(st64->multicast);
1787 st64->collisions = tswap64(st64->collisions);
1789 /* detailed rx_errors: */
1790 st64->rx_length_errors = tswap64(st64->rx_length_errors);
1791 st64->rx_over_errors = tswap64(st64->rx_over_errors);
1792 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
1793 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
1794 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
1795 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
1797 /* detailed tx_errors */
1798 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
1799 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
1800 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
1801 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
1802 st64->tx_window_errors = tswap64(st64->tx_window_errors);
1805 st64->rx_compressed = tswap64(st64->rx_compressed);
1806 st64->tx_compressed = tswap64(st64->tx_compressed);
1808 /* struct rtnl_link_ifmap */
1810 map = RTA_DATA(rtattr);
1811 map->mem_start = tswap64(map->mem_start);
1812 map->mem_end = tswap64(map->mem_end);
1813 map->base_addr = tswap64(map->base_addr);
1814 map->irq = tswap16(map->irq);
1819 /* FIXME: implement nested type */
1820 gemu_log("Unimplemented nested type %d\n", rtattr->rta_type);
1823 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
1829 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
1832 struct ifa_cacheinfo *ci;
1834 switch (rtattr->rta_type) {
1835 /* binary: depends on family type */
1845 u32 = RTA_DATA(rtattr);
1846 *u32 = tswap32(*u32);
1848 /* struct ifa_cacheinfo */
1850 ci = RTA_DATA(rtattr);
1851 ci->ifa_prefered = tswap32(ci->ifa_prefered);
1852 ci->ifa_valid = tswap32(ci->ifa_valid);
1853 ci->cstamp = tswap32(ci->cstamp);
1854 ci->tstamp = tswap32(ci->tstamp);
1857 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
1863 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
1866 switch (rtattr->rta_type) {
1867 /* binary: depends on family type */
1876 u32 = RTA_DATA(rtattr);
1877 *u32 = tswap32(*u32);
1880 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
1886 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
1887 uint32_t rtattr_len)
1889 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1890 host_to_target_data_link_rtattr);
1893 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
1894 uint32_t rtattr_len)
1896 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1897 host_to_target_data_addr_rtattr);
1900 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
1901 uint32_t rtattr_len)
1903 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1904 host_to_target_data_route_rtattr);
1907 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
1910 struct ifinfomsg *ifi;
1911 struct ifaddrmsg *ifa;
1914 nlmsg_len = nlh->nlmsg_len;
1915 switch (nlh->nlmsg_type) {
1919 ifi = NLMSG_DATA(nlh);
1920 ifi->ifi_type = tswap16(ifi->ifi_type);
1921 ifi->ifi_index = tswap32(ifi->ifi_index);
1922 ifi->ifi_flags = tswap32(ifi->ifi_flags);
1923 ifi->ifi_change = tswap32(ifi->ifi_change);
1924 host_to_target_link_rtattr(IFLA_RTA(ifi),
1925 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
1930 ifa = NLMSG_DATA(nlh);
1931 ifa->ifa_index = tswap32(ifa->ifa_index);
1932 host_to_target_addr_rtattr(IFA_RTA(ifa),
1933 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
1938 rtm = NLMSG_DATA(nlh);
1939 rtm->rtm_flags = tswap32(rtm->rtm_flags);
1940 host_to_target_route_rtattr(RTM_RTA(rtm),
1941 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
1944 return -TARGET_EINVAL;
1949 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
1952 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
1955 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
1957 abi_long (*target_to_host_rtattr)
1962 while (len >= sizeof(struct rtattr)) {
1963 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
1964 tswap16(rtattr->rta_len) > len) {
1967 rtattr->rta_len = tswap16(rtattr->rta_len);
1968 rtattr->rta_type = tswap16(rtattr->rta_type);
1969 ret = target_to_host_rtattr(rtattr);
1973 len -= RTA_ALIGN(rtattr->rta_len);
1974 rtattr = (struct rtattr *)(((char *)rtattr) +
1975 RTA_ALIGN(rtattr->rta_len));
1980 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
1982 switch (rtattr->rta_type) {
1984 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
1990 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
1992 switch (rtattr->rta_type) {
1993 /* binary: depends on family type */
1998 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2004 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2007 switch (rtattr->rta_type) {
2008 /* binary: depends on family type */
2015 u32 = RTA_DATA(rtattr);
2016 *u32 = tswap32(*u32);
2019 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2025 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2026 uint32_t rtattr_len)
2028 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2029 target_to_host_data_link_rtattr);
2032 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2033 uint32_t rtattr_len)
2035 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2036 target_to_host_data_addr_rtattr);
2039 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2040 uint32_t rtattr_len)
2042 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2043 target_to_host_data_route_rtattr);
2046 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2048 struct ifinfomsg *ifi;
2049 struct ifaddrmsg *ifa;
2052 switch (nlh->nlmsg_type) {
2057 ifi = NLMSG_DATA(nlh);
2058 ifi->ifi_type = tswap16(ifi->ifi_type);
2059 ifi->ifi_index = tswap32(ifi->ifi_index);
2060 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2061 ifi->ifi_change = tswap32(ifi->ifi_change);
2062 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2063 NLMSG_LENGTH(sizeof(*ifi)));
2068 ifa = NLMSG_DATA(nlh);
2069 ifa->ifa_index = tswap32(ifa->ifa_index);
2070 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2071 NLMSG_LENGTH(sizeof(*ifa)));
2077 rtm = NLMSG_DATA(nlh);
2078 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2079 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2080 NLMSG_LENGTH(sizeof(*rtm)));
2083 return -TARGET_EOPNOTSUPP;
2088 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2090 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2092 #endif /* CONFIG_RTNETLINK */
2094 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2096 switch (nlh->nlmsg_type) {
2098 gemu_log("Unknown host audit message type %d\n",
2100 return -TARGET_EINVAL;
2105 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2108 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2111 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2113 switch (nlh->nlmsg_type) {
2115 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2116 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2119 gemu_log("Unknown target audit message type %d\n",
2121 return -TARGET_EINVAL;
2127 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2129 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2132 /* do_setsockopt() Must return target values and target errnos. */
2133 static abi_long do_setsockopt(int sockfd, int level, int optname,
2134 abi_ulong optval_addr, socklen_t optlen)
2138 struct ip_mreqn *ip_mreq;
2139 struct ip_mreq_source *ip_mreq_source;
2143 /* TCP options all take an 'int' value. */
2144 if (optlen < sizeof(uint32_t))
2145 return -TARGET_EINVAL;
2147 if (get_user_u32(val, optval_addr))
2148 return -TARGET_EFAULT;
2149 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2156 case IP_ROUTER_ALERT:
2160 case IP_MTU_DISCOVER:
2166 case IP_MULTICAST_TTL:
2167 case IP_MULTICAST_LOOP:
2169 if (optlen >= sizeof(uint32_t)) {
2170 if (get_user_u32(val, optval_addr))
2171 return -TARGET_EFAULT;
2172 } else if (optlen >= 1) {
2173 if (get_user_u8(val, optval_addr))
2174 return -TARGET_EFAULT;
2176 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2178 case IP_ADD_MEMBERSHIP:
2179 case IP_DROP_MEMBERSHIP:
2180 if (optlen < sizeof (struct target_ip_mreq) ||
2181 optlen > sizeof (struct target_ip_mreqn))
2182 return -TARGET_EINVAL;
2184 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2185 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2186 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2189 case IP_BLOCK_SOURCE:
2190 case IP_UNBLOCK_SOURCE:
2191 case IP_ADD_SOURCE_MEMBERSHIP:
2192 case IP_DROP_SOURCE_MEMBERSHIP:
2193 if (optlen != sizeof (struct target_ip_mreq_source))
2194 return -TARGET_EINVAL;
2196 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2197 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2198 unlock_user (ip_mreq_source, optval_addr, 0);
2207 case IPV6_MTU_DISCOVER:
2210 case IPV6_RECVPKTINFO:
2212 if (optlen < sizeof(uint32_t)) {
2213 return -TARGET_EINVAL;
2215 if (get_user_u32(val, optval_addr)) {
2216 return -TARGET_EFAULT;
2218 ret = get_errno(setsockopt(sockfd, level, optname,
2219 &val, sizeof(val)));
2228 /* struct icmp_filter takes an u32 value */
2229 if (optlen < sizeof(uint32_t)) {
2230 return -TARGET_EINVAL;
2233 if (get_user_u32(val, optval_addr)) {
2234 return -TARGET_EFAULT;
2236 ret = get_errno(setsockopt(sockfd, level, optname,
2237 &val, sizeof(val)));
2244 case TARGET_SOL_SOCKET:
2246 case TARGET_SO_RCVTIMEO:
2250 optname = SO_RCVTIMEO;
2253 if (optlen != sizeof(struct target_timeval)) {
2254 return -TARGET_EINVAL;
2257 if (copy_from_user_timeval(&tv, optval_addr)) {
2258 return -TARGET_EFAULT;
2261 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2265 case TARGET_SO_SNDTIMEO:
2266 optname = SO_SNDTIMEO;
2268 case TARGET_SO_ATTACH_FILTER:
2270 struct target_sock_fprog *tfprog;
2271 struct target_sock_filter *tfilter;
2272 struct sock_fprog fprog;
2273 struct sock_filter *filter;
2276 if (optlen != sizeof(*tfprog)) {
2277 return -TARGET_EINVAL;
2279 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2280 return -TARGET_EFAULT;
2282 if (!lock_user_struct(VERIFY_READ, tfilter,
2283 tswapal(tfprog->filter), 0)) {
2284 unlock_user_struct(tfprog, optval_addr, 1);
2285 return -TARGET_EFAULT;
2288 fprog.len = tswap16(tfprog->len);
2289 filter = g_try_new(struct sock_filter, fprog.len);
2290 if (filter == NULL) {
2291 unlock_user_struct(tfilter, tfprog->filter, 1);
2292 unlock_user_struct(tfprog, optval_addr, 1);
2293 return -TARGET_ENOMEM;
2295 for (i = 0; i < fprog.len; i++) {
2296 filter[i].code = tswap16(tfilter[i].code);
2297 filter[i].jt = tfilter[i].jt;
2298 filter[i].jf = tfilter[i].jf;
2299 filter[i].k = tswap32(tfilter[i].k);
2301 fprog.filter = filter;
2303 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2304 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2307 unlock_user_struct(tfilter, tfprog->filter, 1);
2308 unlock_user_struct(tfprog, optval_addr, 1);
2311 case TARGET_SO_BINDTODEVICE:
2313 char *dev_ifname, *addr_ifname;
2315 if (optlen > IFNAMSIZ - 1) {
2316 optlen = IFNAMSIZ - 1;
2318 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2320 return -TARGET_EFAULT;
2322 optname = SO_BINDTODEVICE;
2323 addr_ifname = alloca(IFNAMSIZ);
2324 memcpy(addr_ifname, dev_ifname, optlen);
2325 addr_ifname[optlen] = 0;
2326 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2327 addr_ifname, optlen));
2328 unlock_user (dev_ifname, optval_addr, 0);
2331 /* Options with 'int' argument. */
2332 case TARGET_SO_DEBUG:
2335 case TARGET_SO_REUSEADDR:
2336 optname = SO_REUSEADDR;
2338 case TARGET_SO_TYPE:
2341 case TARGET_SO_ERROR:
2344 case TARGET_SO_DONTROUTE:
2345 optname = SO_DONTROUTE;
2347 case TARGET_SO_BROADCAST:
2348 optname = SO_BROADCAST;
2350 case TARGET_SO_SNDBUF:
2351 optname = SO_SNDBUF;
2353 case TARGET_SO_SNDBUFFORCE:
2354 optname = SO_SNDBUFFORCE;
2356 case TARGET_SO_RCVBUF:
2357 optname = SO_RCVBUF;
2359 case TARGET_SO_RCVBUFFORCE:
2360 optname = SO_RCVBUFFORCE;
2362 case TARGET_SO_KEEPALIVE:
2363 optname = SO_KEEPALIVE;
2365 case TARGET_SO_OOBINLINE:
2366 optname = SO_OOBINLINE;
2368 case TARGET_SO_NO_CHECK:
2369 optname = SO_NO_CHECK;
2371 case TARGET_SO_PRIORITY:
2372 optname = SO_PRIORITY;
2375 case TARGET_SO_BSDCOMPAT:
2376 optname = SO_BSDCOMPAT;
2379 case TARGET_SO_PASSCRED:
2380 optname = SO_PASSCRED;
2382 case TARGET_SO_PASSSEC:
2383 optname = SO_PASSSEC;
2385 case TARGET_SO_TIMESTAMP:
2386 optname = SO_TIMESTAMP;
2388 case TARGET_SO_RCVLOWAT:
2389 optname = SO_RCVLOWAT;
2395 if (optlen < sizeof(uint32_t))
2396 return -TARGET_EINVAL;
2398 if (get_user_u32(val, optval_addr))
2399 return -TARGET_EFAULT;
2400 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2404 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2405 ret = -TARGET_ENOPROTOOPT;
2410 /* do_getsockopt() Must return target values and target errnos. */
2411 static abi_long do_getsockopt(int sockfd, int level, int optname,
2412 abi_ulong optval_addr, abi_ulong optlen)
2419 case TARGET_SOL_SOCKET:
2422 /* These don't just return a single integer */
2423 case TARGET_SO_LINGER:
2424 case TARGET_SO_RCVTIMEO:
2425 case TARGET_SO_SNDTIMEO:
2426 case TARGET_SO_PEERNAME:
2428 case TARGET_SO_PEERCRED: {
2431 struct target_ucred *tcr;
2433 if (get_user_u32(len, optlen)) {
2434 return -TARGET_EFAULT;
2437 return -TARGET_EINVAL;
2441 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2449 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2450 return -TARGET_EFAULT;
2452 __put_user(cr.pid, &tcr->pid);
2453 __put_user(cr.uid, &tcr->uid);
2454 __put_user(cr.gid, &tcr->gid);
2455 unlock_user_struct(tcr, optval_addr, 1);
2456 if (put_user_u32(len, optlen)) {
2457 return -TARGET_EFAULT;
2461 /* Options with 'int' argument. */
2462 case TARGET_SO_DEBUG:
2465 case TARGET_SO_REUSEADDR:
2466 optname = SO_REUSEADDR;
2468 case TARGET_SO_TYPE:
2471 case TARGET_SO_ERROR:
2474 case TARGET_SO_DONTROUTE:
2475 optname = SO_DONTROUTE;
2477 case TARGET_SO_BROADCAST:
2478 optname = SO_BROADCAST;
2480 case TARGET_SO_SNDBUF:
2481 optname = SO_SNDBUF;
2483 case TARGET_SO_RCVBUF:
2484 optname = SO_RCVBUF;
2486 case TARGET_SO_KEEPALIVE:
2487 optname = SO_KEEPALIVE;
2489 case TARGET_SO_OOBINLINE:
2490 optname = SO_OOBINLINE;
2492 case TARGET_SO_NO_CHECK:
2493 optname = SO_NO_CHECK;
2495 case TARGET_SO_PRIORITY:
2496 optname = SO_PRIORITY;
2499 case TARGET_SO_BSDCOMPAT:
2500 optname = SO_BSDCOMPAT;
2503 case TARGET_SO_PASSCRED:
2504 optname = SO_PASSCRED;
2506 case TARGET_SO_TIMESTAMP:
2507 optname = SO_TIMESTAMP;
2509 case TARGET_SO_RCVLOWAT:
2510 optname = SO_RCVLOWAT;
2512 case TARGET_SO_ACCEPTCONN:
2513 optname = SO_ACCEPTCONN;
2520 /* TCP options all take an 'int' value. */
2522 if (get_user_u32(len, optlen))
2523 return -TARGET_EFAULT;
2525 return -TARGET_EINVAL;
2527 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2530 if (optname == SO_TYPE) {
2531 val = host_to_target_sock_type(val);
2536 if (put_user_u32(val, optval_addr))
2537 return -TARGET_EFAULT;
2539 if (put_user_u8(val, optval_addr))
2540 return -TARGET_EFAULT;
2542 if (put_user_u32(len, optlen))
2543 return -TARGET_EFAULT;
2550 case IP_ROUTER_ALERT:
2554 case IP_MTU_DISCOVER:
2560 case IP_MULTICAST_TTL:
2561 case IP_MULTICAST_LOOP:
2562 if (get_user_u32(len, optlen))
2563 return -TARGET_EFAULT;
2565 return -TARGET_EINVAL;
2567 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2570 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2572 if (put_user_u32(len, optlen)
2573 || put_user_u8(val, optval_addr))
2574 return -TARGET_EFAULT;
2576 if (len > sizeof(int))
2578 if (put_user_u32(len, optlen)
2579 || put_user_u32(val, optval_addr))
2580 return -TARGET_EFAULT;
2584 ret = -TARGET_ENOPROTOOPT;
2590 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2592 ret = -TARGET_EOPNOTSUPP;
2598 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2599 int count, int copy)
2601 struct target_iovec *target_vec;
2603 abi_ulong total_len, max_len;
2606 bool bad_address = false;
2612 if (count < 0 || count > IOV_MAX) {
2617 vec = g_try_new0(struct iovec, count);
2623 target_vec = lock_user(VERIFY_READ, target_addr,
2624 count * sizeof(struct target_iovec), 1);
2625 if (target_vec == NULL) {
2630 /* ??? If host page size > target page size, this will result in a
2631 value larger than what we can actually support. */
2632 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2635 for (i = 0; i < count; i++) {
2636 abi_ulong base = tswapal(target_vec[i].iov_base);
2637 abi_long len = tswapal(target_vec[i].iov_len);
2642 } else if (len == 0) {
2643 /* Zero length pointer is ignored. */
2644 vec[i].iov_base = 0;
2646 vec[i].iov_base = lock_user(type, base, len, copy);
2647 /* If the first buffer pointer is bad, this is a fault. But
2648 * subsequent bad buffers will result in a partial write; this
2649 * is realized by filling the vector with null pointers and
2651 if (!vec[i].iov_base) {
2662 if (len > max_len - total_len) {
2663 len = max_len - total_len;
2666 vec[i].iov_len = len;
2670 unlock_user(target_vec, target_addr, 0);
2675 if (tswapal(target_vec[i].iov_len) > 0) {
2676 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2679 unlock_user(target_vec, target_addr, 0);
2686 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2687 int count, int copy)
2689 struct target_iovec *target_vec;
2692 target_vec = lock_user(VERIFY_READ, target_addr,
2693 count * sizeof(struct target_iovec), 1);
2695 for (i = 0; i < count; i++) {
2696 abi_ulong base = tswapal(target_vec[i].iov_base);
2697 abi_long len = tswapal(target_vec[i].iov_len);
2701 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2703 unlock_user(target_vec, target_addr, 0);
2709 static inline int target_to_host_sock_type(int *type)
2712 int target_type = *type;
2714 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2715 case TARGET_SOCK_DGRAM:
2716 host_type = SOCK_DGRAM;
2718 case TARGET_SOCK_STREAM:
2719 host_type = SOCK_STREAM;
2722 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2725 if (target_type & TARGET_SOCK_CLOEXEC) {
2726 #if defined(SOCK_CLOEXEC)
2727 host_type |= SOCK_CLOEXEC;
2729 return -TARGET_EINVAL;
2732 if (target_type & TARGET_SOCK_NONBLOCK) {
2733 #if defined(SOCK_NONBLOCK)
2734 host_type |= SOCK_NONBLOCK;
2735 #elif !defined(O_NONBLOCK)
2736 return -TARGET_EINVAL;
2743 /* Try to emulate socket type flags after socket creation. */
2744 static int sock_flags_fixup(int fd, int target_type)
2746 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2747 if (target_type & TARGET_SOCK_NONBLOCK) {
2748 int flags = fcntl(fd, F_GETFL);
2749 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2751 return -TARGET_EINVAL;
2758 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2759 abi_ulong target_addr,
2762 struct sockaddr *addr = host_addr;
2763 struct target_sockaddr *target_saddr;
2765 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2766 if (!target_saddr) {
2767 return -TARGET_EFAULT;
2770 memcpy(addr, target_saddr, len);
2771 addr->sa_family = tswap16(target_saddr->sa_family);
2772 /* spkt_protocol is big-endian */
2774 unlock_user(target_saddr, target_addr, 0);
2778 static TargetFdTrans target_packet_trans = {
2779 .target_to_host_addr = packet_target_to_host_sockaddr,
2782 #ifdef CONFIG_RTNETLINK
2783 static abi_long netlink_route_target_to_host(void *buf, size_t len)
2785 return target_to_host_nlmsg_route(buf, len);
2788 static abi_long netlink_route_host_to_target(void *buf, size_t len)
2790 return host_to_target_nlmsg_route(buf, len);
2793 static TargetFdTrans target_netlink_route_trans = {
2794 .target_to_host_data = netlink_route_target_to_host,
2795 .host_to_target_data = netlink_route_host_to_target,
2797 #endif /* CONFIG_RTNETLINK */
2799 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
2801 return target_to_host_nlmsg_audit(buf, len);
2804 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
2806 return host_to_target_nlmsg_audit(buf, len);
2809 static TargetFdTrans target_netlink_audit_trans = {
2810 .target_to_host_data = netlink_audit_target_to_host,
2811 .host_to_target_data = netlink_audit_host_to_target,
2814 /* do_socket() Must return target values and target errnos. */
2815 static abi_long do_socket(int domain, int type, int protocol)
2817 int target_type = type;
2820 ret = target_to_host_sock_type(&type);
2825 if (domain == PF_NETLINK && !(
2826 #ifdef CONFIG_RTNETLINK
2827 protocol == NETLINK_ROUTE ||
2829 protocol == NETLINK_KOBJECT_UEVENT ||
2830 protocol == NETLINK_AUDIT)) {
2831 return -EPFNOSUPPORT;
2834 if (domain == AF_PACKET ||
2835 (domain == AF_INET && type == SOCK_PACKET)) {
2836 protocol = tswap16(protocol);
2839 ret = get_errno(socket(domain, type, protocol));
2841 ret = sock_flags_fixup(ret, target_type);
2842 if (type == SOCK_PACKET) {
2843 /* Manage an obsolete case :
2844 * if socket type is SOCK_PACKET, bind by name
2846 fd_trans_register(ret, &target_packet_trans);
2847 } else if (domain == PF_NETLINK) {
2849 #ifdef CONFIG_RTNETLINK
2851 fd_trans_register(ret, &target_netlink_route_trans);
2854 case NETLINK_KOBJECT_UEVENT:
2855 /* nothing to do: messages are strings */
2858 fd_trans_register(ret, &target_netlink_audit_trans);
2861 g_assert_not_reached();
2868 /* do_bind() Must return target values and target errnos. */
2869 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2875 if ((int)addrlen < 0) {
2876 return -TARGET_EINVAL;
2879 addr = alloca(addrlen+1);
2881 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2885 return get_errno(bind(sockfd, addr, addrlen));
2888 /* do_connect() Must return target values and target errnos. */
2889 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2895 if ((int)addrlen < 0) {
2896 return -TARGET_EINVAL;
2899 addr = alloca(addrlen+1);
2901 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2905 return get_errno(safe_connect(sockfd, addr, addrlen));
2908 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2909 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2910 int flags, int send)
2916 abi_ulong target_vec;
2918 if (msgp->msg_name) {
2919 msg.msg_namelen = tswap32(msgp->msg_namelen);
2920 msg.msg_name = alloca(msg.msg_namelen+1);
2921 ret = target_to_host_sockaddr(fd, msg.msg_name,
2922 tswapal(msgp->msg_name),
2928 msg.msg_name = NULL;
2929 msg.msg_namelen = 0;
2931 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2932 msg.msg_control = alloca(msg.msg_controllen);
2933 msg.msg_flags = tswap32(msgp->msg_flags);
2935 count = tswapal(msgp->msg_iovlen);
2936 target_vec = tswapal(msgp->msg_iov);
2937 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2938 target_vec, count, send);
2940 ret = -host_to_target_errno(errno);
2943 msg.msg_iovlen = count;
2947 if (fd_trans_target_to_host_data(fd)) {
2948 ret = fd_trans_target_to_host_data(fd)(msg.msg_iov->iov_base,
2949 msg.msg_iov->iov_len);
2951 ret = target_to_host_cmsg(&msg, msgp);
2954 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2957 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2958 if (!is_error(ret)) {
2960 if (fd_trans_host_to_target_data(fd)) {
2961 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2962 msg.msg_iov->iov_len);
2964 ret = host_to_target_cmsg(msgp, &msg);
2966 if (!is_error(ret)) {
2967 msgp->msg_namelen = tswap32(msg.msg_namelen);
2968 if (msg.msg_name != NULL) {
2969 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2970 msg.msg_name, msg.msg_namelen);
2982 unlock_iovec(vec, target_vec, count, !send);
2987 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2988 int flags, int send)
2991 struct target_msghdr *msgp;
2993 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2997 return -TARGET_EFAULT;
2999 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3000 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3004 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3005 * so it might not have this *mmsg-specific flag either.
3007 #ifndef MSG_WAITFORONE
3008 #define MSG_WAITFORONE 0x10000
3011 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3012 unsigned int vlen, unsigned int flags,
3015 struct target_mmsghdr *mmsgp;
3019 if (vlen > UIO_MAXIOV) {
3023 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3025 return -TARGET_EFAULT;
3028 for (i = 0; i < vlen; i++) {
3029 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3030 if (is_error(ret)) {
3033 mmsgp[i].msg_len = tswap32(ret);
3034 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3035 if (flags & MSG_WAITFORONE) {
3036 flags |= MSG_DONTWAIT;
3040 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3042 /* Return number of datagrams sent if we sent any at all;
3043 * otherwise return the error.
3051 /* If we don't have a system accept4() then just call accept.
3052 * The callsites to do_accept4() will ensure that they don't
3053 * pass a non-zero flags argument in this config.
3055 #ifndef CONFIG_ACCEPT4
3056 static inline int accept4(int sockfd, struct sockaddr *addr,
3057 socklen_t *addrlen, int flags)
3060 return accept(sockfd, addr, addrlen);
3064 /* do_accept4() Must return target values and target errnos. */
3065 static abi_long do_accept4(int fd, abi_ulong target_addr,
3066 abi_ulong target_addrlen_addr, int flags)
3073 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3075 if (target_addr == 0) {
3076 return get_errno(accept4(fd, NULL, NULL, host_flags));
3079 /* linux returns EINVAL if addrlen pointer is invalid */
3080 if (get_user_u32(addrlen, target_addrlen_addr))
3081 return -TARGET_EINVAL;
3083 if ((int)addrlen < 0) {
3084 return -TARGET_EINVAL;
3087 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3088 return -TARGET_EINVAL;
3090 addr = alloca(addrlen);
3092 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
3093 if (!is_error(ret)) {
3094 host_to_target_sockaddr(target_addr, addr, addrlen);
3095 if (put_user_u32(addrlen, target_addrlen_addr))
3096 ret = -TARGET_EFAULT;
3101 /* do_getpeername() Must return target values and target errnos. */
3102 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3103 abi_ulong target_addrlen_addr)
3109 if (get_user_u32(addrlen, target_addrlen_addr))
3110 return -TARGET_EFAULT;
3112 if ((int)addrlen < 0) {
3113 return -TARGET_EINVAL;
3116 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3117 return -TARGET_EFAULT;
3119 addr = alloca(addrlen);
3121 ret = get_errno(getpeername(fd, addr, &addrlen));
3122 if (!is_error(ret)) {
3123 host_to_target_sockaddr(target_addr, addr, addrlen);
3124 if (put_user_u32(addrlen, target_addrlen_addr))
3125 ret = -TARGET_EFAULT;
3130 /* do_getsockname() Must return target values and target errnos. */
3131 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3132 abi_ulong target_addrlen_addr)
3138 if (get_user_u32(addrlen, target_addrlen_addr))
3139 return -TARGET_EFAULT;
3141 if ((int)addrlen < 0) {
3142 return -TARGET_EINVAL;
3145 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3146 return -TARGET_EFAULT;
3148 addr = alloca(addrlen);
3150 ret = get_errno(getsockname(fd, addr, &addrlen));
3151 if (!is_error(ret)) {
3152 host_to_target_sockaddr(target_addr, addr, addrlen);
3153 if (put_user_u32(addrlen, target_addrlen_addr))
3154 ret = -TARGET_EFAULT;
3159 /* do_socketpair() Must return target values and target errnos. */
3160 static abi_long do_socketpair(int domain, int type, int protocol,
3161 abi_ulong target_tab_addr)
3166 target_to_host_sock_type(&type);
3168 ret = get_errno(socketpair(domain, type, protocol, tab));
3169 if (!is_error(ret)) {
3170 if (put_user_s32(tab[0], target_tab_addr)
3171 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3172 ret = -TARGET_EFAULT;
3177 /* do_sendto() Must return target values and target errnos. */
3178 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3179 abi_ulong target_addr, socklen_t addrlen)
3185 if ((int)addrlen < 0) {
3186 return -TARGET_EINVAL;
3189 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3191 return -TARGET_EFAULT;
3192 if (fd_trans_target_to_host_data(fd)) {
3193 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3195 unlock_user(host_msg, msg, 0);
3200 addr = alloca(addrlen+1);
3201 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3203 unlock_user(host_msg, msg, 0);
3206 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3208 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3210 unlock_user(host_msg, msg, 0);
3214 /* do_recvfrom() Must return target values and target errnos. */
3215 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3216 abi_ulong target_addr,
3217 abi_ulong target_addrlen)
3224 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3226 return -TARGET_EFAULT;
3228 if (get_user_u32(addrlen, target_addrlen)) {
3229 ret = -TARGET_EFAULT;
3232 if ((int)addrlen < 0) {
3233 ret = -TARGET_EINVAL;
3236 addr = alloca(addrlen);
3237 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3240 addr = NULL; /* To keep compiler quiet. */
3241 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3243 if (!is_error(ret)) {
3245 host_to_target_sockaddr(target_addr, addr, addrlen);
3246 if (put_user_u32(addrlen, target_addrlen)) {
3247 ret = -TARGET_EFAULT;
3251 unlock_user(host_msg, msg, len);
3254 unlock_user(host_msg, msg, 0);
3259 #ifdef TARGET_NR_socketcall
3260 /* do_socketcall() Must return target values and target errnos. */
3261 static abi_long do_socketcall(int num, abi_ulong vptr)
3263 static const unsigned ac[] = { /* number of arguments per call */
3264 [SOCKOP_socket] = 3, /* domain, type, protocol */
3265 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3266 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3267 [SOCKOP_listen] = 2, /* sockfd, backlog */
3268 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3269 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3270 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3271 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3272 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3273 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3274 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3275 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3276 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3277 [SOCKOP_shutdown] = 2, /* sockfd, how */
3278 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3279 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3280 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3281 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3282 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3283 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3285 abi_long a[6]; /* max 6 args */
3287 /* first, collect the arguments in a[] according to ac[] */
3288 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3290 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3291 for (i = 0; i < ac[num]; ++i) {
3292 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3293 return -TARGET_EFAULT;
3298 /* now when we have the args, actually handle the call */
3300 case SOCKOP_socket: /* domain, type, protocol */
3301 return do_socket(a[0], a[1], a[2]);
3302 case SOCKOP_bind: /* sockfd, addr, addrlen */
3303 return do_bind(a[0], a[1], a[2]);
3304 case SOCKOP_connect: /* sockfd, addr, addrlen */
3305 return do_connect(a[0], a[1], a[2]);
3306 case SOCKOP_listen: /* sockfd, backlog */
3307 return get_errno(listen(a[0], a[1]));
3308 case SOCKOP_accept: /* sockfd, addr, addrlen */
3309 return do_accept4(a[0], a[1], a[2], 0);
3310 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3311 return do_accept4(a[0], a[1], a[2], a[3]);
3312 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3313 return do_getsockname(a[0], a[1], a[2]);
3314 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3315 return do_getpeername(a[0], a[1], a[2]);
3316 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3317 return do_socketpair(a[0], a[1], a[2], a[3]);
3318 case SOCKOP_send: /* sockfd, msg, len, flags */
3319 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3320 case SOCKOP_recv: /* sockfd, msg, len, flags */
3321 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3322 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3323 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3324 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3325 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3326 case SOCKOP_shutdown: /* sockfd, how */
3327 return get_errno(shutdown(a[0], a[1]));
3328 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3329 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3330 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3331 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3332 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3333 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3334 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3335 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3336 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3337 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3338 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3339 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3341 gemu_log("Unsupported socketcall: %d\n", num);
3342 return -TARGET_ENOSYS;
3347 #define N_SHM_REGIONS 32
3349 static struct shm_region {
3353 } shm_regions[N_SHM_REGIONS];
3355 struct target_semid_ds
3357 struct target_ipc_perm sem_perm;
3358 abi_ulong sem_otime;
3359 #if !defined(TARGET_PPC64)
3360 abi_ulong __unused1;
3362 abi_ulong sem_ctime;
3363 #if !defined(TARGET_PPC64)
3364 abi_ulong __unused2;
3366 abi_ulong sem_nsems;
3367 abi_ulong __unused3;
3368 abi_ulong __unused4;
3371 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3372 abi_ulong target_addr)
3374 struct target_ipc_perm *target_ip;
3375 struct target_semid_ds *target_sd;
3377 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3378 return -TARGET_EFAULT;
3379 target_ip = &(target_sd->sem_perm);
3380 host_ip->__key = tswap32(target_ip->__key);
3381 host_ip->uid = tswap32(target_ip->uid);
3382 host_ip->gid = tswap32(target_ip->gid);
3383 host_ip->cuid = tswap32(target_ip->cuid);
3384 host_ip->cgid = tswap32(target_ip->cgid);
3385 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3386 host_ip->mode = tswap32(target_ip->mode);
3388 host_ip->mode = tswap16(target_ip->mode);
3390 #if defined(TARGET_PPC)
3391 host_ip->__seq = tswap32(target_ip->__seq);
3393 host_ip->__seq = tswap16(target_ip->__seq);
3395 unlock_user_struct(target_sd, target_addr, 0);
3399 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3400 struct ipc_perm *host_ip)
3402 struct target_ipc_perm *target_ip;
3403 struct target_semid_ds *target_sd;
3405 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3406 return -TARGET_EFAULT;
3407 target_ip = &(target_sd->sem_perm);
3408 target_ip->__key = tswap32(host_ip->__key);
3409 target_ip->uid = tswap32(host_ip->uid);
3410 target_ip->gid = tswap32(host_ip->gid);
3411 target_ip->cuid = tswap32(host_ip->cuid);
3412 target_ip->cgid = tswap32(host_ip->cgid);
3413 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3414 target_ip->mode = tswap32(host_ip->mode);
3416 target_ip->mode = tswap16(host_ip->mode);
3418 #if defined(TARGET_PPC)
3419 target_ip->__seq = tswap32(host_ip->__seq);
3421 target_ip->__seq = tswap16(host_ip->__seq);
3423 unlock_user_struct(target_sd, target_addr, 1);
3427 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3428 abi_ulong target_addr)
3430 struct target_semid_ds *target_sd;
3432 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3433 return -TARGET_EFAULT;
3434 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3435 return -TARGET_EFAULT;
3436 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3437 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3438 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3439 unlock_user_struct(target_sd, target_addr, 0);
3443 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3444 struct semid_ds *host_sd)
3446 struct target_semid_ds *target_sd;
3448 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3449 return -TARGET_EFAULT;
3450 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3451 return -TARGET_EFAULT;
3452 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3453 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3454 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3455 unlock_user_struct(target_sd, target_addr, 1);
3459 struct target_seminfo {
3472 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3473 struct seminfo *host_seminfo)
3475 struct target_seminfo *target_seminfo;
3476 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3477 return -TARGET_EFAULT;
3478 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3479 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3480 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3481 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3482 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3483 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3484 __put_user(host_seminfo->semume, &target_seminfo->semume);
3485 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3486 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3487 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3488 unlock_user_struct(target_seminfo, target_addr, 1);
3494 struct semid_ds *buf;
3495 unsigned short *array;
3496 struct seminfo *__buf;
3499 union target_semun {
3506 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3507 abi_ulong target_addr)
3510 unsigned short *array;
3512 struct semid_ds semid_ds;
3515 semun.buf = &semid_ds;
3517 ret = semctl(semid, 0, IPC_STAT, semun);
3519 return get_errno(ret);
3521 nsems = semid_ds.sem_nsems;
3523 *host_array = g_try_new(unsigned short, nsems);
3525 return -TARGET_ENOMEM;
3527 array = lock_user(VERIFY_READ, target_addr,
3528 nsems*sizeof(unsigned short), 1);
3530 g_free(*host_array);
3531 return -TARGET_EFAULT;
3534 for(i=0; i<nsems; i++) {
3535 __get_user((*host_array)[i], &array[i]);
3537 unlock_user(array, target_addr, 0);
3542 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3543 unsigned short **host_array)
3546 unsigned short *array;
3548 struct semid_ds semid_ds;
3551 semun.buf = &semid_ds;
3553 ret = semctl(semid, 0, IPC_STAT, semun);
3555 return get_errno(ret);
3557 nsems = semid_ds.sem_nsems;
3559 array = lock_user(VERIFY_WRITE, target_addr,
3560 nsems*sizeof(unsigned short), 0);
3562 return -TARGET_EFAULT;
3564 for(i=0; i<nsems; i++) {
3565 __put_user((*host_array)[i], &array[i]);
3567 g_free(*host_array);
3568 unlock_user(array, target_addr, 1);
3573 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3574 abi_ulong target_arg)
3576 union target_semun target_su = { .buf = target_arg };
3578 struct semid_ds dsarg;
3579 unsigned short *array = NULL;
3580 struct seminfo seminfo;
3581 abi_long ret = -TARGET_EINVAL;
3588 /* In 64 bit cross-endian situations, we will erroneously pick up
3589 * the wrong half of the union for the "val" element. To rectify
3590 * this, the entire 8-byte structure is byteswapped, followed by
3591 * a swap of the 4 byte val field. In other cases, the data is
3592 * already in proper host byte order. */
3593 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3594 target_su.buf = tswapal(target_su.buf);
3595 arg.val = tswap32(target_su.val);
3597 arg.val = target_su.val;
3599 ret = get_errno(semctl(semid, semnum, cmd, arg));
3603 err = target_to_host_semarray(semid, &array, target_su.array);
3607 ret = get_errno(semctl(semid, semnum, cmd, arg));
3608 err = host_to_target_semarray(semid, target_su.array, &array);
3615 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3619 ret = get_errno(semctl(semid, semnum, cmd, arg));
3620 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3626 arg.__buf = &seminfo;
3627 ret = get_errno(semctl(semid, semnum, cmd, arg));
3628 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3636 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3643 struct target_sembuf {
3644 unsigned short sem_num;
3649 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3650 abi_ulong target_addr,
3653 struct target_sembuf *target_sembuf;
3656 target_sembuf = lock_user(VERIFY_READ, target_addr,
3657 nsops*sizeof(struct target_sembuf), 1);
3659 return -TARGET_EFAULT;
3661 for(i=0; i<nsops; i++) {
3662 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3663 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3664 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3667 unlock_user(target_sembuf, target_addr, 0);
3672 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3674 struct sembuf sops[nsops];
3676 if (target_to_host_sembuf(sops, ptr, nsops))
3677 return -TARGET_EFAULT;
3679 return get_errno(semop(semid, sops, nsops));
3682 struct target_msqid_ds
3684 struct target_ipc_perm msg_perm;
3685 abi_ulong msg_stime;
3686 #if TARGET_ABI_BITS == 32
3687 abi_ulong __unused1;
3689 abi_ulong msg_rtime;
3690 #if TARGET_ABI_BITS == 32
3691 abi_ulong __unused2;
3693 abi_ulong msg_ctime;
3694 #if TARGET_ABI_BITS == 32
3695 abi_ulong __unused3;
3697 abi_ulong __msg_cbytes;
3699 abi_ulong msg_qbytes;
3700 abi_ulong msg_lspid;
3701 abi_ulong msg_lrpid;
3702 abi_ulong __unused4;
3703 abi_ulong __unused5;
3706 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3707 abi_ulong target_addr)
3709 struct target_msqid_ds *target_md;
3711 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3712 return -TARGET_EFAULT;
3713 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3714 return -TARGET_EFAULT;
3715 host_md->msg_stime = tswapal(target_md->msg_stime);
3716 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3717 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3718 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3719 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3720 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3721 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3722 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3723 unlock_user_struct(target_md, target_addr, 0);
3727 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3728 struct msqid_ds *host_md)
3730 struct target_msqid_ds *target_md;
3732 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3733 return -TARGET_EFAULT;
3734 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3735 return -TARGET_EFAULT;
3736 target_md->msg_stime = tswapal(host_md->msg_stime);
3737 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3738 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3739 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3740 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3741 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3742 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3743 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3744 unlock_user_struct(target_md, target_addr, 1);
3748 struct target_msginfo {
3756 unsigned short int msgseg;
3759 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3760 struct msginfo *host_msginfo)
3762 struct target_msginfo *target_msginfo;
3763 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3764 return -TARGET_EFAULT;
3765 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3766 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3767 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3768 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3769 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3770 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3771 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3772 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3773 unlock_user_struct(target_msginfo, target_addr, 1);
3777 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3779 struct msqid_ds dsarg;
3780 struct msginfo msginfo;
3781 abi_long ret = -TARGET_EINVAL;
3789 if (target_to_host_msqid_ds(&dsarg,ptr))
3790 return -TARGET_EFAULT;
3791 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3792 if (host_to_target_msqid_ds(ptr,&dsarg))
3793 return -TARGET_EFAULT;
3796 ret = get_errno(msgctl(msgid, cmd, NULL));
3800 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3801 if (host_to_target_msginfo(ptr, &msginfo))
3802 return -TARGET_EFAULT;
3809 struct target_msgbuf {
3814 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3815 ssize_t msgsz, int msgflg)
3817 struct target_msgbuf *target_mb;
3818 struct msgbuf *host_mb;
3822 return -TARGET_EINVAL;
3825 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3826 return -TARGET_EFAULT;
3827 host_mb = g_try_malloc(msgsz + sizeof(long));
3829 unlock_user_struct(target_mb, msgp, 0);
3830 return -TARGET_ENOMEM;
3832 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3833 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3834 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3836 unlock_user_struct(target_mb, msgp, 0);
3841 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3842 ssize_t msgsz, abi_long msgtyp,
3845 struct target_msgbuf *target_mb;
3847 struct msgbuf *host_mb;
3851 return -TARGET_EINVAL;
3854 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3855 return -TARGET_EFAULT;
3857 host_mb = g_try_malloc(msgsz + sizeof(long));
3859 ret = -TARGET_ENOMEM;
3862 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3865 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3866 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3867 if (!target_mtext) {
3868 ret = -TARGET_EFAULT;
3871 memcpy(target_mb->mtext, host_mb->mtext, ret);
3872 unlock_user(target_mtext, target_mtext_addr, ret);
3875 target_mb->mtype = tswapal(host_mb->mtype);
3879 unlock_user_struct(target_mb, msgp, 1);
3884 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3885 abi_ulong target_addr)
3887 struct target_shmid_ds *target_sd;
3889 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3890 return -TARGET_EFAULT;
3891 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3892 return -TARGET_EFAULT;
3893 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3894 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3895 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3896 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3897 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3898 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3899 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3900 unlock_user_struct(target_sd, target_addr, 0);
3904 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3905 struct shmid_ds *host_sd)
3907 struct target_shmid_ds *target_sd;
3909 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3910 return -TARGET_EFAULT;
3911 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3912 return -TARGET_EFAULT;
3913 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3914 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3915 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3916 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3917 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3918 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3919 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3920 unlock_user_struct(target_sd, target_addr, 1);
3924 struct target_shminfo {
3932 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3933 struct shminfo *host_shminfo)
3935 struct target_shminfo *target_shminfo;
3936 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3937 return -TARGET_EFAULT;
3938 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3939 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3940 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3941 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3942 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3943 unlock_user_struct(target_shminfo, target_addr, 1);
3947 struct target_shm_info {
3952 abi_ulong swap_attempts;
3953 abi_ulong swap_successes;
3956 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3957 struct shm_info *host_shm_info)
3959 struct target_shm_info *target_shm_info;
3960 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3961 return -TARGET_EFAULT;
3962 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3963 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3964 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3965 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3966 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3967 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3968 unlock_user_struct(target_shm_info, target_addr, 1);
3972 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3974 struct shmid_ds dsarg;
3975 struct shminfo shminfo;
3976 struct shm_info shm_info;
3977 abi_long ret = -TARGET_EINVAL;
3985 if (target_to_host_shmid_ds(&dsarg, buf))
3986 return -TARGET_EFAULT;
3987 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3988 if (host_to_target_shmid_ds(buf, &dsarg))
3989 return -TARGET_EFAULT;
3992 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3993 if (host_to_target_shminfo(buf, &shminfo))
3994 return -TARGET_EFAULT;
3997 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3998 if (host_to_target_shm_info(buf, &shm_info))
3999 return -TARGET_EFAULT;
4004 ret = get_errno(shmctl(shmid, cmd, NULL));
4011 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4015 struct shmid_ds shm_info;
4018 /* find out the length of the shared memory segment */
4019 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4020 if (is_error(ret)) {
4021 /* can't get length, bail out */
4028 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4030 abi_ulong mmap_start;
4032 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4034 if (mmap_start == -1) {
4036 host_raddr = (void *)-1;
4038 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4041 if (host_raddr == (void *)-1) {
4043 return get_errno((long)host_raddr);
4045 raddr=h2g((unsigned long)host_raddr);
4047 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4048 PAGE_VALID | PAGE_READ |
4049 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4051 for (i = 0; i < N_SHM_REGIONS; i++) {
4052 if (!shm_regions[i].in_use) {
4053 shm_regions[i].in_use = true;
4054 shm_regions[i].start = raddr;
4055 shm_regions[i].size = shm_info.shm_segsz;
4065 static inline abi_long do_shmdt(abi_ulong shmaddr)
4069 for (i = 0; i < N_SHM_REGIONS; ++i) {
4070 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4071 shm_regions[i].in_use = false;
4072 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4077 return get_errno(shmdt(g2h(shmaddr)));
4080 #ifdef TARGET_NR_ipc
4081 /* ??? This only works with linear mappings. */
4082 /* do_ipc() must return target values and target errnos. */
4083 static abi_long do_ipc(unsigned int call, abi_long first,
4084 abi_long second, abi_long third,
4085 abi_long ptr, abi_long fifth)
4090 version = call >> 16;
4095 ret = do_semop(first, ptr, second);
4099 ret = get_errno(semget(first, second, third));
4102 case IPCOP_semctl: {
4103 /* The semun argument to semctl is passed by value, so dereference the
4106 get_user_ual(atptr, ptr);
4107 ret = do_semctl(first, second, third, atptr);
4112 ret = get_errno(msgget(first, second));
4116 ret = do_msgsnd(first, ptr, second, third);
4120 ret = do_msgctl(first, second, ptr);
4127 struct target_ipc_kludge {
4132 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4133 ret = -TARGET_EFAULT;
4137 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4139 unlock_user_struct(tmp, ptr, 0);
4143 ret = do_msgrcv(first, ptr, second, fifth, third);
4152 raddr = do_shmat(first, ptr, second);
4153 if (is_error(raddr))
4154 return get_errno(raddr);
4155 if (put_user_ual(raddr, third))
4156 return -TARGET_EFAULT;
4160 ret = -TARGET_EINVAL;
4165 ret = do_shmdt(ptr);
4169 /* IPC_* flag values are the same on all linux platforms */
4170 ret = get_errno(shmget(first, second, third));
4173 /* IPC_* and SHM_* command values are the same on all linux platforms */
4175 ret = do_shmctl(first, second, ptr);
4178 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4179 ret = -TARGET_ENOSYS;
4186 /* kernel structure types definitions */
4188 #define STRUCT(name, ...) STRUCT_ ## name,
4189 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4191 #include "syscall_types.h"
4195 #undef STRUCT_SPECIAL
4197 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4198 #define STRUCT_SPECIAL(name)
4199 #include "syscall_types.h"
4201 #undef STRUCT_SPECIAL
4203 typedef struct IOCTLEntry IOCTLEntry;
4205 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4206 int fd, int cmd, abi_long arg);
4210 unsigned int host_cmd;
4213 do_ioctl_fn *do_ioctl;
4214 const argtype arg_type[5];
4217 #define IOC_R 0x0001
4218 #define IOC_W 0x0002
4219 #define IOC_RW (IOC_R | IOC_W)
4221 #define MAX_STRUCT_SIZE 4096
4223 #ifdef CONFIG_FIEMAP
4224 /* So fiemap access checks don't overflow on 32 bit systems.
4225 * This is very slightly smaller than the limit imposed by
4226 * the underlying kernel.
4228 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4229 / sizeof(struct fiemap_extent))
4231 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4232 int fd, int cmd, abi_long arg)
4234 /* The parameter for this ioctl is a struct fiemap followed
4235 * by an array of struct fiemap_extent whose size is set
4236 * in fiemap->fm_extent_count. The array is filled in by the
4239 int target_size_in, target_size_out;
4241 const argtype *arg_type = ie->arg_type;
4242 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4245 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4249 assert(arg_type[0] == TYPE_PTR);
4250 assert(ie->access == IOC_RW);
4252 target_size_in = thunk_type_size(arg_type, 0);
4253 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4255 return -TARGET_EFAULT;
4257 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4258 unlock_user(argptr, arg, 0);
4259 fm = (struct fiemap *)buf_temp;
4260 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4261 return -TARGET_EINVAL;
4264 outbufsz = sizeof (*fm) +
4265 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4267 if (outbufsz > MAX_STRUCT_SIZE) {
4268 /* We can't fit all the extents into the fixed size buffer.
4269 * Allocate one that is large enough and use it instead.
4271 fm = g_try_malloc(outbufsz);
4273 return -TARGET_ENOMEM;
4275 memcpy(fm, buf_temp, sizeof(struct fiemap));
4278 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
4279 if (!is_error(ret)) {
4280 target_size_out = target_size_in;
4281 /* An extent_count of 0 means we were only counting the extents
4282 * so there are no structs to copy
4284 if (fm->fm_extent_count != 0) {
4285 target_size_out += fm->fm_mapped_extents * extent_size;
4287 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4289 ret = -TARGET_EFAULT;
4291 /* Convert the struct fiemap */
4292 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4293 if (fm->fm_extent_count != 0) {
4294 p = argptr + target_size_in;
4295 /* ...and then all the struct fiemap_extents */
4296 for (i = 0; i < fm->fm_mapped_extents; i++) {
4297 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4302 unlock_user(argptr, arg, target_size_out);
4312 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4313 int fd, int cmd, abi_long arg)
4315 const argtype *arg_type = ie->arg_type;
4319 struct ifconf *host_ifconf;
4321 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4322 int target_ifreq_size;
4327 abi_long target_ifc_buf;
4331 assert(arg_type[0] == TYPE_PTR);
4332 assert(ie->access == IOC_RW);
4335 target_size = thunk_type_size(arg_type, 0);
4337 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4339 return -TARGET_EFAULT;
4340 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4341 unlock_user(argptr, arg, 0);
4343 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4344 target_ifc_len = host_ifconf->ifc_len;
4345 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4347 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4348 nb_ifreq = target_ifc_len / target_ifreq_size;
4349 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4351 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4352 if (outbufsz > MAX_STRUCT_SIZE) {
4353 /* We can't fit all the extents into the fixed size buffer.
4354 * Allocate one that is large enough and use it instead.
4356 host_ifconf = malloc(outbufsz);
4358 return -TARGET_ENOMEM;
4360 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4363 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4365 host_ifconf->ifc_len = host_ifc_len;
4366 host_ifconf->ifc_buf = host_ifc_buf;
4368 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
4369 if (!is_error(ret)) {
4370 /* convert host ifc_len to target ifc_len */
4372 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4373 target_ifc_len = nb_ifreq * target_ifreq_size;
4374 host_ifconf->ifc_len = target_ifc_len;
4376 /* restore target ifc_buf */
4378 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4380 /* copy struct ifconf to target user */
4382 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4384 return -TARGET_EFAULT;
4385 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4386 unlock_user(argptr, arg, target_size);
4388 /* copy ifreq[] to target user */
4390 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4391 for (i = 0; i < nb_ifreq ; i++) {
4392 thunk_convert(argptr + i * target_ifreq_size,
4393 host_ifc_buf + i * sizeof(struct ifreq),
4394 ifreq_arg_type, THUNK_TARGET);
4396 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4406 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4407 int cmd, abi_long arg)
4410 struct dm_ioctl *host_dm;
4411 abi_long guest_data;
4412 uint32_t guest_data_size;
4414 const argtype *arg_type = ie->arg_type;
4416 void *big_buf = NULL;
4420 target_size = thunk_type_size(arg_type, 0);
4421 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4423 ret = -TARGET_EFAULT;
4426 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4427 unlock_user(argptr, arg, 0);
4429 /* buf_temp is too small, so fetch things into a bigger buffer */
4430 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4431 memcpy(big_buf, buf_temp, target_size);
4435 guest_data = arg + host_dm->data_start;
4436 if ((guest_data - arg) < 0) {
4440 guest_data_size = host_dm->data_size - host_dm->data_start;
4441 host_data = (char*)host_dm + host_dm->data_start;
4443 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4444 switch (ie->host_cmd) {
4446 case DM_LIST_DEVICES:
4449 case DM_DEV_SUSPEND:
4452 case DM_TABLE_STATUS:
4453 case DM_TABLE_CLEAR:
4455 case DM_LIST_VERSIONS:
4459 case DM_DEV_SET_GEOMETRY:
4460 /* data contains only strings */
4461 memcpy(host_data, argptr, guest_data_size);
4464 memcpy(host_data, argptr, guest_data_size);
4465 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4469 void *gspec = argptr;
4470 void *cur_data = host_data;
4471 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4472 int spec_size = thunk_type_size(arg_type, 0);
4475 for (i = 0; i < host_dm->target_count; i++) {
4476 struct dm_target_spec *spec = cur_data;
4480 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4481 slen = strlen((char*)gspec + spec_size) + 1;
4483 spec->next = sizeof(*spec) + slen;
4484 strcpy((char*)&spec[1], gspec + spec_size);
4486 cur_data += spec->next;
4491 ret = -TARGET_EINVAL;
4492 unlock_user(argptr, guest_data, 0);
4495 unlock_user(argptr, guest_data, 0);
4497 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4498 if (!is_error(ret)) {
4499 guest_data = arg + host_dm->data_start;
4500 guest_data_size = host_dm->data_size - host_dm->data_start;
4501 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4502 switch (ie->host_cmd) {
4507 case DM_DEV_SUSPEND:
4510 case DM_TABLE_CLEAR:
4512 case DM_DEV_SET_GEOMETRY:
4513 /* no return data */
4515 case DM_LIST_DEVICES:
4517 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4518 uint32_t remaining_data = guest_data_size;
4519 void *cur_data = argptr;
4520 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4521 int nl_size = 12; /* can't use thunk_size due to alignment */
4524 uint32_t next = nl->next;
4526 nl->next = nl_size + (strlen(nl->name) + 1);
4528 if (remaining_data < nl->next) {
4529 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4532 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4533 strcpy(cur_data + nl_size, nl->name);
4534 cur_data += nl->next;
4535 remaining_data -= nl->next;
4539 nl = (void*)nl + next;
4544 case DM_TABLE_STATUS:
4546 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4547 void *cur_data = argptr;
4548 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4549 int spec_size = thunk_type_size(arg_type, 0);
4552 for (i = 0; i < host_dm->target_count; i++) {
4553 uint32_t next = spec->next;
4554 int slen = strlen((char*)&spec[1]) + 1;
4555 spec->next = (cur_data - argptr) + spec_size + slen;
4556 if (guest_data_size < spec->next) {
4557 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4560 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4561 strcpy(cur_data + spec_size, (char*)&spec[1]);
4562 cur_data = argptr + spec->next;
4563 spec = (void*)host_dm + host_dm->data_start + next;
4569 void *hdata = (void*)host_dm + host_dm->data_start;
4570 int count = *(uint32_t*)hdata;
4571 uint64_t *hdev = hdata + 8;
4572 uint64_t *gdev = argptr + 8;
4575 *(uint32_t*)argptr = tswap32(count);
4576 for (i = 0; i < count; i++) {
4577 *gdev = tswap64(*hdev);
4583 case DM_LIST_VERSIONS:
4585 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4586 uint32_t remaining_data = guest_data_size;
4587 void *cur_data = argptr;
4588 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4589 int vers_size = thunk_type_size(arg_type, 0);
4592 uint32_t next = vers->next;
4594 vers->next = vers_size + (strlen(vers->name) + 1);
4596 if (remaining_data < vers->next) {
4597 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4600 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4601 strcpy(cur_data + vers_size, vers->name);
4602 cur_data += vers->next;
4603 remaining_data -= vers->next;
4607 vers = (void*)vers + next;
4612 unlock_user(argptr, guest_data, 0);
4613 ret = -TARGET_EINVAL;
4616 unlock_user(argptr, guest_data, guest_data_size);
4618 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4620 ret = -TARGET_EFAULT;
4623 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4624 unlock_user(argptr, arg, target_size);
4631 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4632 int cmd, abi_long arg)
4636 const argtype *arg_type = ie->arg_type;
4637 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4640 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4641 struct blkpg_partition host_part;
4643 /* Read and convert blkpg */
4645 target_size = thunk_type_size(arg_type, 0);
4646 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4648 ret = -TARGET_EFAULT;
4651 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4652 unlock_user(argptr, arg, 0);
4654 switch (host_blkpg->op) {
4655 case BLKPG_ADD_PARTITION:
4656 case BLKPG_DEL_PARTITION:
4657 /* payload is struct blkpg_partition */
4660 /* Unknown opcode */
4661 ret = -TARGET_EINVAL;
4665 /* Read and convert blkpg->data */
4666 arg = (abi_long)(uintptr_t)host_blkpg->data;
4667 target_size = thunk_type_size(part_arg_type, 0);
4668 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4670 ret = -TARGET_EFAULT;
4673 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4674 unlock_user(argptr, arg, 0);
4676 /* Swizzle the data pointer to our local copy and call! */
4677 host_blkpg->data = &host_part;
4678 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
4684 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4685 int fd, int cmd, abi_long arg)
4687 const argtype *arg_type = ie->arg_type;
4688 const StructEntry *se;
4689 const argtype *field_types;
4690 const int *dst_offsets, *src_offsets;
4693 abi_ulong *target_rt_dev_ptr;
4694 unsigned long *host_rt_dev_ptr;
4698 assert(ie->access == IOC_W);
4699 assert(*arg_type == TYPE_PTR);
4701 assert(*arg_type == TYPE_STRUCT);
4702 target_size = thunk_type_size(arg_type, 0);
4703 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4705 return -TARGET_EFAULT;
4708 assert(*arg_type == (int)STRUCT_rtentry);
4709 se = struct_entries + *arg_type++;
4710 assert(se->convert[0] == NULL);
4711 /* convert struct here to be able to catch rt_dev string */
4712 field_types = se->field_types;
4713 dst_offsets = se->field_offsets[THUNK_HOST];
4714 src_offsets = se->field_offsets[THUNK_TARGET];
4715 for (i = 0; i < se->nb_fields; i++) {
4716 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4717 assert(*field_types == TYPE_PTRVOID);
4718 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4719 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4720 if (*target_rt_dev_ptr != 0) {
4721 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4722 tswapal(*target_rt_dev_ptr));
4723 if (!*host_rt_dev_ptr) {
4724 unlock_user(argptr, arg, 0);
4725 return -TARGET_EFAULT;
4728 *host_rt_dev_ptr = 0;
4733 field_types = thunk_convert(buf_temp + dst_offsets[i],
4734 argptr + src_offsets[i],
4735 field_types, THUNK_HOST);
4737 unlock_user(argptr, arg, 0);
4739 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4740 if (*host_rt_dev_ptr != 0) {
4741 unlock_user((void *)*host_rt_dev_ptr,
4742 *target_rt_dev_ptr, 0);
4747 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4748 int fd, int cmd, abi_long arg)
4750 int sig = target_to_host_signal(arg);
4751 return get_errno(ioctl(fd, ie->host_cmd, sig));
4754 static IOCTLEntry ioctl_entries[] = {
4755 #define IOCTL(cmd, access, ...) \
4756 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4757 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4758 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4763 /* ??? Implement proper locking for ioctls. */
4764 /* do_ioctl() Must return target values and target errnos. */
4765 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4767 const IOCTLEntry *ie;
4768 const argtype *arg_type;
4770 uint8_t buf_temp[MAX_STRUCT_SIZE];
4776 if (ie->target_cmd == 0) {
4777 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4778 return -TARGET_ENOSYS;
4780 if (ie->target_cmd == cmd)
4784 arg_type = ie->arg_type;
4786 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4789 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4792 switch(arg_type[0]) {
4795 ret = get_errno(ioctl(fd, ie->host_cmd));
4799 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4803 target_size = thunk_type_size(arg_type, 0);
4804 switch(ie->access) {
4806 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4807 if (!is_error(ret)) {
4808 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4810 return -TARGET_EFAULT;
4811 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4812 unlock_user(argptr, arg, target_size);
4816 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4818 return -TARGET_EFAULT;
4819 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4820 unlock_user(argptr, arg, 0);
4821 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4825 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4827 return -TARGET_EFAULT;
4828 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4829 unlock_user(argptr, arg, 0);
4830 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4831 if (!is_error(ret)) {
4832 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4834 return -TARGET_EFAULT;
4835 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4836 unlock_user(argptr, arg, target_size);
4842 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4843 (long)cmd, arg_type[0]);
4844 ret = -TARGET_ENOSYS;
4850 static const bitmask_transtbl iflag_tbl[] = {
4851 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4852 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4853 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4854 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4855 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4856 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4857 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4858 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4859 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4860 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4861 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4862 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4863 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4864 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4868 static const bitmask_transtbl oflag_tbl[] = {
4869 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4870 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4871 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4872 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4873 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4874 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4875 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4876 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4877 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4878 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4879 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4880 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4881 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4882 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4883 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4884 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4885 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4886 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4887 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4888 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4889 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4890 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4891 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4892 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4896 static const bitmask_transtbl cflag_tbl[] = {
4897 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4898 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4899 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4900 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4901 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4902 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4903 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4904 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4905 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4906 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4907 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4908 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4909 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4910 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4911 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4912 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4913 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4914 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4915 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4916 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4917 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4918 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4919 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4920 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4921 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4922 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4923 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4924 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4925 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4926 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4927 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4931 static const bitmask_transtbl lflag_tbl[] = {
4932 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4933 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4934 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4935 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4936 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4937 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4938 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4939 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4940 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4941 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4942 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4943 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4944 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4945 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4946 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4950 static void target_to_host_termios (void *dst, const void *src)
4952 struct host_termios *host = dst;
4953 const struct target_termios *target = src;
4956 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4958 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4960 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4962 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4963 host->c_line = target->c_line;
4965 memset(host->c_cc, 0, sizeof(host->c_cc));
4966 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4967 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4968 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4969 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4970 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4971 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4972 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4973 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4974 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4975 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4976 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4977 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4978 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4979 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4980 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4981 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4982 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4985 static void host_to_target_termios (void *dst, const void *src)
4987 struct target_termios *target = dst;
4988 const struct host_termios *host = src;
4991 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4993 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4995 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4997 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4998 target->c_line = host->c_line;
5000 memset(target->c_cc, 0, sizeof(target->c_cc));
5001 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5002 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5003 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5004 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5005 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5006 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5007 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5008 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5009 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5010 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5011 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5012 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5013 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5014 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5015 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5016 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5017 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5020 static const StructEntry struct_termios_def = {
5021 .convert = { host_to_target_termios, target_to_host_termios },
5022 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5023 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5026 static bitmask_transtbl mmap_flags_tbl[] = {
5027 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5028 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5029 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5030 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5031 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5032 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5033 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5034 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5035 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5040 #if defined(TARGET_I386)
5042 /* NOTE: there is really one LDT for all the threads */
5043 static uint8_t *ldt_table;
5045 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5052 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5053 if (size > bytecount)
5055 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5057 return -TARGET_EFAULT;
5058 /* ??? Should this by byteswapped? */
5059 memcpy(p, ldt_table, size);
5060 unlock_user(p, ptr, size);
5064 /* XXX: add locking support */
5065 static abi_long write_ldt(CPUX86State *env,
5066 abi_ulong ptr, unsigned long bytecount, int oldmode)
5068 struct target_modify_ldt_ldt_s ldt_info;
5069 struct target_modify_ldt_ldt_s *target_ldt_info;
5070 int seg_32bit, contents, read_exec_only, limit_in_pages;
5071 int seg_not_present, useable, lm;
5072 uint32_t *lp, entry_1, entry_2;
5074 if (bytecount != sizeof(ldt_info))
5075 return -TARGET_EINVAL;
5076 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5077 return -TARGET_EFAULT;
5078 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5079 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5080 ldt_info.limit = tswap32(target_ldt_info->limit);
5081 ldt_info.flags = tswap32(target_ldt_info->flags);
5082 unlock_user_struct(target_ldt_info, ptr, 0);
5084 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5085 return -TARGET_EINVAL;
5086 seg_32bit = ldt_info.flags & 1;
5087 contents = (ldt_info.flags >> 1) & 3;
5088 read_exec_only = (ldt_info.flags >> 3) & 1;
5089 limit_in_pages = (ldt_info.flags >> 4) & 1;
5090 seg_not_present = (ldt_info.flags >> 5) & 1;
5091 useable = (ldt_info.flags >> 6) & 1;
5095 lm = (ldt_info.flags >> 7) & 1;
5097 if (contents == 3) {
5099 return -TARGET_EINVAL;
5100 if (seg_not_present == 0)
5101 return -TARGET_EINVAL;
5103 /* allocate the LDT */
5105 env->ldt.base = target_mmap(0,
5106 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5107 PROT_READ|PROT_WRITE,
5108 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5109 if (env->ldt.base == -1)
5110 return -TARGET_ENOMEM;
5111 memset(g2h(env->ldt.base), 0,
5112 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5113 env->ldt.limit = 0xffff;
5114 ldt_table = g2h(env->ldt.base);
5117 /* NOTE: same code as Linux kernel */
5118 /* Allow LDTs to be cleared by the user. */
5119 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5122 read_exec_only == 1 &&
5124 limit_in_pages == 0 &&
5125 seg_not_present == 1 &&
5133 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5134 (ldt_info.limit & 0x0ffff);
5135 entry_2 = (ldt_info.base_addr & 0xff000000) |
5136 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5137 (ldt_info.limit & 0xf0000) |
5138 ((read_exec_only ^ 1) << 9) |
5140 ((seg_not_present ^ 1) << 15) |
5142 (limit_in_pages << 23) |
5146 entry_2 |= (useable << 20);
5148 /* Install the new entry ... */
5150 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5151 lp[0] = tswap32(entry_1);
5152 lp[1] = tswap32(entry_2);
5156 /* specific and weird i386 syscalls */
5157 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5158 unsigned long bytecount)
5164 ret = read_ldt(ptr, bytecount);
5167 ret = write_ldt(env, ptr, bytecount, 1);
5170 ret = write_ldt(env, ptr, bytecount, 0);
5173 ret = -TARGET_ENOSYS;
5179 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5180 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5182 uint64_t *gdt_table = g2h(env->gdt.base);
5183 struct target_modify_ldt_ldt_s ldt_info;
5184 struct target_modify_ldt_ldt_s *target_ldt_info;
5185 int seg_32bit, contents, read_exec_only, limit_in_pages;
5186 int seg_not_present, useable, lm;
5187 uint32_t *lp, entry_1, entry_2;
5190 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5191 if (!target_ldt_info)
5192 return -TARGET_EFAULT;
5193 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5194 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5195 ldt_info.limit = tswap32(target_ldt_info->limit);
5196 ldt_info.flags = tswap32(target_ldt_info->flags);
5197 if (ldt_info.entry_number == -1) {
5198 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5199 if (gdt_table[i] == 0) {
5200 ldt_info.entry_number = i;
5201 target_ldt_info->entry_number = tswap32(i);
5206 unlock_user_struct(target_ldt_info, ptr, 1);
5208 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5209 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5210 return -TARGET_EINVAL;
5211 seg_32bit = ldt_info.flags & 1;
5212 contents = (ldt_info.flags >> 1) & 3;
5213 read_exec_only = (ldt_info.flags >> 3) & 1;
5214 limit_in_pages = (ldt_info.flags >> 4) & 1;
5215 seg_not_present = (ldt_info.flags >> 5) & 1;
5216 useable = (ldt_info.flags >> 6) & 1;
5220 lm = (ldt_info.flags >> 7) & 1;
5223 if (contents == 3) {
5224 if (seg_not_present == 0)
5225 return -TARGET_EINVAL;
5228 /* NOTE: same code as Linux kernel */
5229 /* Allow LDTs to be cleared by the user. */
5230 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5231 if ((contents == 0 &&
5232 read_exec_only == 1 &&
5234 limit_in_pages == 0 &&
5235 seg_not_present == 1 &&
5243 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5244 (ldt_info.limit & 0x0ffff);
5245 entry_2 = (ldt_info.base_addr & 0xff000000) |
5246 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5247 (ldt_info.limit & 0xf0000) |
5248 ((read_exec_only ^ 1) << 9) |
5250 ((seg_not_present ^ 1) << 15) |
5252 (limit_in_pages << 23) |
5257 /* Install the new entry ... */
5259 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5260 lp[0] = tswap32(entry_1);
5261 lp[1] = tswap32(entry_2);
5265 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5267 struct target_modify_ldt_ldt_s *target_ldt_info;
5268 uint64_t *gdt_table = g2h(env->gdt.base);
5269 uint32_t base_addr, limit, flags;
5270 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5271 int seg_not_present, useable, lm;
5272 uint32_t *lp, entry_1, entry_2;
5274 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5275 if (!target_ldt_info)
5276 return -TARGET_EFAULT;
5277 idx = tswap32(target_ldt_info->entry_number);
5278 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5279 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5280 unlock_user_struct(target_ldt_info, ptr, 1);
5281 return -TARGET_EINVAL;
5283 lp = (uint32_t *)(gdt_table + idx);
5284 entry_1 = tswap32(lp[0]);
5285 entry_2 = tswap32(lp[1]);
5287 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5288 contents = (entry_2 >> 10) & 3;
5289 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5290 seg_32bit = (entry_2 >> 22) & 1;
5291 limit_in_pages = (entry_2 >> 23) & 1;
5292 useable = (entry_2 >> 20) & 1;
5296 lm = (entry_2 >> 21) & 1;
5298 flags = (seg_32bit << 0) | (contents << 1) |
5299 (read_exec_only << 3) | (limit_in_pages << 4) |
5300 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5301 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5302 base_addr = (entry_1 >> 16) |
5303 (entry_2 & 0xff000000) |
5304 ((entry_2 & 0xff) << 16);
5305 target_ldt_info->base_addr = tswapal(base_addr);
5306 target_ldt_info->limit = tswap32(limit);
5307 target_ldt_info->flags = tswap32(flags);
5308 unlock_user_struct(target_ldt_info, ptr, 1);
5311 #endif /* TARGET_I386 && TARGET_ABI32 */
5313 #ifndef TARGET_ABI32
5314 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5321 case TARGET_ARCH_SET_GS:
5322 case TARGET_ARCH_SET_FS:
5323 if (code == TARGET_ARCH_SET_GS)
5327 cpu_x86_load_seg(env, idx, 0);
5328 env->segs[idx].base = addr;
5330 case TARGET_ARCH_GET_GS:
5331 case TARGET_ARCH_GET_FS:
5332 if (code == TARGET_ARCH_GET_GS)
5336 val = env->segs[idx].base;
5337 if (put_user(val, addr, abi_ulong))
5338 ret = -TARGET_EFAULT;
5341 ret = -TARGET_EINVAL;
5348 #endif /* defined(TARGET_I386) */
5350 #define NEW_STACK_SIZE 0x40000
5353 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5356 pthread_mutex_t mutex;
5357 pthread_cond_t cond;
5360 abi_ulong child_tidptr;
5361 abi_ulong parent_tidptr;
5365 static void *clone_func(void *arg)
5367 new_thread_info *info = arg;
5372 rcu_register_thread();
5374 cpu = ENV_GET_CPU(env);
5376 ts = (TaskState *)cpu->opaque;
5377 info->tid = gettid();
5378 cpu->host_tid = info->tid;
5380 if (info->child_tidptr)
5381 put_user_u32(info->tid, info->child_tidptr);
5382 if (info->parent_tidptr)
5383 put_user_u32(info->tid, info->parent_tidptr);
5384 /* Enable signals. */
5385 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5386 /* Signal to the parent that we're ready. */
5387 pthread_mutex_lock(&info->mutex);
5388 pthread_cond_broadcast(&info->cond);
5389 pthread_mutex_unlock(&info->mutex);
5390 /* Wait until the parent has finshed initializing the tls state. */
5391 pthread_mutex_lock(&clone_lock);
5392 pthread_mutex_unlock(&clone_lock);
5398 /* do_fork() Must return host values and target errnos (unlike most
5399 do_*() functions). */
5400 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5401 abi_ulong parent_tidptr, target_ulong newtls,
5402 abi_ulong child_tidptr)
5404 CPUState *cpu = ENV_GET_CPU(env);
5408 CPUArchState *new_env;
5409 unsigned int nptl_flags;
5412 /* Emulate vfork() with fork() */
5413 if (flags & CLONE_VFORK)
5414 flags &= ~(CLONE_VFORK | CLONE_VM);
5416 if (flags & CLONE_VM) {
5417 TaskState *parent_ts = (TaskState *)cpu->opaque;
5418 new_thread_info info;
5419 pthread_attr_t attr;
5421 ts = g_new0(TaskState, 1);
5422 init_task_state(ts);
5423 /* we create a new CPU instance. */
5424 new_env = cpu_copy(env);
5425 /* Init regs that differ from the parent. */
5426 cpu_clone_regs(new_env, newsp);
5427 new_cpu = ENV_GET_CPU(new_env);
5428 new_cpu->opaque = ts;
5429 ts->bprm = parent_ts->bprm;
5430 ts->info = parent_ts->info;
5431 ts->signal_mask = parent_ts->signal_mask;
5433 flags &= ~CLONE_NPTL_FLAGS2;
5435 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5436 ts->child_tidptr = child_tidptr;
5439 if (nptl_flags & CLONE_SETTLS)
5440 cpu_set_tls (new_env, newtls);
5442 /* Grab a mutex so that thread setup appears atomic. */
5443 pthread_mutex_lock(&clone_lock);
5445 memset(&info, 0, sizeof(info));
5446 pthread_mutex_init(&info.mutex, NULL);
5447 pthread_mutex_lock(&info.mutex);
5448 pthread_cond_init(&info.cond, NULL);
5450 if (nptl_flags & CLONE_CHILD_SETTID)
5451 info.child_tidptr = child_tidptr;
5452 if (nptl_flags & CLONE_PARENT_SETTID)
5453 info.parent_tidptr = parent_tidptr;
5455 ret = pthread_attr_init(&attr);
5456 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5457 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5458 /* It is not safe to deliver signals until the child has finished
5459 initializing, so temporarily block all signals. */
5460 sigfillset(&sigmask);
5461 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5463 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5464 /* TODO: Free new CPU state if thread creation failed. */
5466 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5467 pthread_attr_destroy(&attr);
5469 /* Wait for the child to initialize. */
5470 pthread_cond_wait(&info.cond, &info.mutex);
5472 if (flags & CLONE_PARENT_SETTID)
5473 put_user_u32(ret, parent_tidptr);
5477 pthread_mutex_unlock(&info.mutex);
5478 pthread_cond_destroy(&info.cond);
5479 pthread_mutex_destroy(&info.mutex);
5480 pthread_mutex_unlock(&clone_lock);
5482 /* if no CLONE_VM, we consider it is a fork */
5483 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5484 return -TARGET_EINVAL;
5487 if (block_signals()) {
5488 return -TARGET_ERESTARTSYS;
5494 /* Child Process. */
5496 cpu_clone_regs(env, newsp);
5498 /* There is a race condition here. The parent process could
5499 theoretically read the TID in the child process before the child
5500 tid is set. This would require using either ptrace
5501 (not implemented) or having *_tidptr to point at a shared memory
5502 mapping. We can't repeat the spinlock hack used above because
5503 the child process gets its own copy of the lock. */
5504 if (flags & CLONE_CHILD_SETTID)
5505 put_user_u32(gettid(), child_tidptr);
5506 if (flags & CLONE_PARENT_SETTID)
5507 put_user_u32(gettid(), parent_tidptr);
5508 ts = (TaskState *)cpu->opaque;
5509 if (flags & CLONE_SETTLS)
5510 cpu_set_tls (env, newtls);
5511 if (flags & CLONE_CHILD_CLEARTID)
5512 ts->child_tidptr = child_tidptr;
5520 /* warning : doesn't handle linux specific flags... */
5521 static int target_to_host_fcntl_cmd(int cmd)
5524 case TARGET_F_DUPFD:
5525 case TARGET_F_GETFD:
5526 case TARGET_F_SETFD:
5527 case TARGET_F_GETFL:
5528 case TARGET_F_SETFL:
5530 case TARGET_F_GETLK:
5532 case TARGET_F_SETLK:
5534 case TARGET_F_SETLKW:
5536 case TARGET_F_GETOWN:
5538 case TARGET_F_SETOWN:
5540 case TARGET_F_GETSIG:
5542 case TARGET_F_SETSIG:
5544 #if TARGET_ABI_BITS == 32
5545 case TARGET_F_GETLK64:
5547 case TARGET_F_SETLK64:
5549 case TARGET_F_SETLKW64:
5552 case TARGET_F_SETLEASE:
5554 case TARGET_F_GETLEASE:
5556 #ifdef F_DUPFD_CLOEXEC
5557 case TARGET_F_DUPFD_CLOEXEC:
5558 return F_DUPFD_CLOEXEC;
5560 case TARGET_F_NOTIFY:
5563 case TARGET_F_GETOWN_EX:
5567 case TARGET_F_SETOWN_EX:
5571 return -TARGET_EINVAL;
5573 return -TARGET_EINVAL;
5576 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5577 static const bitmask_transtbl flock_tbl[] = {
5578 TRANSTBL_CONVERT(F_RDLCK),
5579 TRANSTBL_CONVERT(F_WRLCK),
5580 TRANSTBL_CONVERT(F_UNLCK),
5581 TRANSTBL_CONVERT(F_EXLCK),
5582 TRANSTBL_CONVERT(F_SHLCK),
5586 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5589 struct target_flock *target_fl;
5590 struct flock64 fl64;
5591 struct target_flock64 *target_fl64;
5593 struct f_owner_ex fox;
5594 struct target_f_owner_ex *target_fox;
5597 int host_cmd = target_to_host_fcntl_cmd(cmd);
5599 if (host_cmd == -TARGET_EINVAL)
5603 case TARGET_F_GETLK:
5604 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5605 return -TARGET_EFAULT;
5607 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5608 fl.l_whence = tswap16(target_fl->l_whence);
5609 fl.l_start = tswapal(target_fl->l_start);
5610 fl.l_len = tswapal(target_fl->l_len);
5611 fl.l_pid = tswap32(target_fl->l_pid);
5612 unlock_user_struct(target_fl, arg, 0);
5613 ret = get_errno(fcntl(fd, host_cmd, &fl));
5615 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
5616 return -TARGET_EFAULT;
5618 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
5619 target_fl->l_whence = tswap16(fl.l_whence);
5620 target_fl->l_start = tswapal(fl.l_start);
5621 target_fl->l_len = tswapal(fl.l_len);
5622 target_fl->l_pid = tswap32(fl.l_pid);
5623 unlock_user_struct(target_fl, arg, 1);
5627 case TARGET_F_SETLK:
5628 case TARGET_F_SETLKW:
5629 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5630 return -TARGET_EFAULT;
5632 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5633 fl.l_whence = tswap16(target_fl->l_whence);
5634 fl.l_start = tswapal(target_fl->l_start);
5635 fl.l_len = tswapal(target_fl->l_len);
5636 fl.l_pid = tswap32(target_fl->l_pid);
5637 unlock_user_struct(target_fl, arg, 0);
5638 ret = get_errno(fcntl(fd, host_cmd, &fl));
5641 case TARGET_F_GETLK64:
5642 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5643 return -TARGET_EFAULT;
5645 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5646 fl64.l_whence = tswap16(target_fl64->l_whence);
5647 fl64.l_start = tswap64(target_fl64->l_start);
5648 fl64.l_len = tswap64(target_fl64->l_len);
5649 fl64.l_pid = tswap32(target_fl64->l_pid);
5650 unlock_user_struct(target_fl64, arg, 0);
5651 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5653 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
5654 return -TARGET_EFAULT;
5655 target_fl64->l_type =
5656 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
5657 target_fl64->l_whence = tswap16(fl64.l_whence);
5658 target_fl64->l_start = tswap64(fl64.l_start);
5659 target_fl64->l_len = tswap64(fl64.l_len);
5660 target_fl64->l_pid = tswap32(fl64.l_pid);
5661 unlock_user_struct(target_fl64, arg, 1);
5664 case TARGET_F_SETLK64:
5665 case TARGET_F_SETLKW64:
5666 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5667 return -TARGET_EFAULT;
5669 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5670 fl64.l_whence = tswap16(target_fl64->l_whence);
5671 fl64.l_start = tswap64(target_fl64->l_start);
5672 fl64.l_len = tswap64(target_fl64->l_len);
5673 fl64.l_pid = tswap32(target_fl64->l_pid);
5674 unlock_user_struct(target_fl64, arg, 0);
5675 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5678 case TARGET_F_GETFL:
5679 ret = get_errno(fcntl(fd, host_cmd, arg));
5681 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5685 case TARGET_F_SETFL:
5686 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
5690 case TARGET_F_GETOWN_EX:
5691 ret = get_errno(fcntl(fd, host_cmd, &fox));
5693 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5694 return -TARGET_EFAULT;
5695 target_fox->type = tswap32(fox.type);
5696 target_fox->pid = tswap32(fox.pid);
5697 unlock_user_struct(target_fox, arg, 1);
5703 case TARGET_F_SETOWN_EX:
5704 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5705 return -TARGET_EFAULT;
5706 fox.type = tswap32(target_fox->type);
5707 fox.pid = tswap32(target_fox->pid);
5708 unlock_user_struct(target_fox, arg, 0);
5709 ret = get_errno(fcntl(fd, host_cmd, &fox));
5713 case TARGET_F_SETOWN:
5714 case TARGET_F_GETOWN:
5715 case TARGET_F_SETSIG:
5716 case TARGET_F_GETSIG:
5717 case TARGET_F_SETLEASE:
5718 case TARGET_F_GETLEASE:
5719 ret = get_errno(fcntl(fd, host_cmd, arg));
5723 ret = get_errno(fcntl(fd, cmd, arg));
5731 static inline int high2lowuid(int uid)
5739 static inline int high2lowgid(int gid)
5747 static inline int low2highuid(int uid)
5749 if ((int16_t)uid == -1)
5755 static inline int low2highgid(int gid)
5757 if ((int16_t)gid == -1)
5762 static inline int tswapid(int id)
5767 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5769 #else /* !USE_UID16 */
5770 static inline int high2lowuid(int uid)
5774 static inline int high2lowgid(int gid)
5778 static inline int low2highuid(int uid)
5782 static inline int low2highgid(int gid)
5786 static inline int tswapid(int id)
5791 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5793 #endif /* USE_UID16 */
5795 /* We must do direct syscalls for setting UID/GID, because we want to
5796 * implement the Linux system call semantics of "change only for this thread",
5797 * not the libc/POSIX semantics of "change for all threads in process".
5798 * (See http://ewontfix.com/17/ for more details.)
5799 * We use the 32-bit version of the syscalls if present; if it is not
5800 * then either the host architecture supports 32-bit UIDs natively with
5801 * the standard syscall, or the 16-bit UID is the best we can do.
5803 #ifdef __NR_setuid32
5804 #define __NR_sys_setuid __NR_setuid32
5806 #define __NR_sys_setuid __NR_setuid
5808 #ifdef __NR_setgid32
5809 #define __NR_sys_setgid __NR_setgid32
5811 #define __NR_sys_setgid __NR_setgid
5813 #ifdef __NR_setresuid32
5814 #define __NR_sys_setresuid __NR_setresuid32
5816 #define __NR_sys_setresuid __NR_setresuid
5818 #ifdef __NR_setresgid32
5819 #define __NR_sys_setresgid __NR_setresgid32
5821 #define __NR_sys_setresgid __NR_setresgid
5824 _syscall1(int, sys_setuid, uid_t, uid)
5825 _syscall1(int, sys_setgid, gid_t, gid)
5826 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5827 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5829 void syscall_init(void)
5832 const argtype *arg_type;
5836 thunk_init(STRUCT_MAX);
5838 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5839 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5840 #include "syscall_types.h"
5842 #undef STRUCT_SPECIAL
5844 /* Build target_to_host_errno_table[] table from
5845 * host_to_target_errno_table[]. */
5846 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5847 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5850 /* we patch the ioctl size if necessary. We rely on the fact that
5851 no ioctl has all the bits at '1' in the size field */
5853 while (ie->target_cmd != 0) {
5854 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5855 TARGET_IOC_SIZEMASK) {
5856 arg_type = ie->arg_type;
5857 if (arg_type[0] != TYPE_PTR) {
5858 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5863 size = thunk_type_size(arg_type, 0);
5864 ie->target_cmd = (ie->target_cmd &
5865 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5866 (size << TARGET_IOC_SIZESHIFT);
5869 /* automatic consistency check if same arch */
5870 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5871 (defined(__x86_64__) && defined(TARGET_X86_64))
5872 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5873 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5874 ie->name, ie->target_cmd, ie->host_cmd);
5881 #if TARGET_ABI_BITS == 32
5882 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5884 #ifdef TARGET_WORDS_BIGENDIAN
5885 return ((uint64_t)word0 << 32) | word1;
5887 return ((uint64_t)word1 << 32) | word0;
5890 #else /* TARGET_ABI_BITS == 32 */
5891 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5895 #endif /* TARGET_ABI_BITS != 32 */
5897 #ifdef TARGET_NR_truncate64
5898 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5903 if (regpairs_aligned(cpu_env)) {
5907 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5911 #ifdef TARGET_NR_ftruncate64
5912 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5917 if (regpairs_aligned(cpu_env)) {
5921 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5925 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5926 abi_ulong target_addr)
5928 struct target_timespec *target_ts;
5930 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5931 return -TARGET_EFAULT;
5932 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5933 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5934 unlock_user_struct(target_ts, target_addr, 0);
5938 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5939 struct timespec *host_ts)
5941 struct target_timespec *target_ts;
5943 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5944 return -TARGET_EFAULT;
5945 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5946 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5947 unlock_user_struct(target_ts, target_addr, 1);
5951 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5952 abi_ulong target_addr)
5954 struct target_itimerspec *target_itspec;
5956 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5957 return -TARGET_EFAULT;
5960 host_itspec->it_interval.tv_sec =
5961 tswapal(target_itspec->it_interval.tv_sec);
5962 host_itspec->it_interval.tv_nsec =
5963 tswapal(target_itspec->it_interval.tv_nsec);
5964 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5965 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5967 unlock_user_struct(target_itspec, target_addr, 1);
5971 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5972 struct itimerspec *host_its)
5974 struct target_itimerspec *target_itspec;
5976 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5977 return -TARGET_EFAULT;
5980 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5981 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5983 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5984 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5986 unlock_user_struct(target_itspec, target_addr, 0);
5990 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5991 abi_ulong target_addr)
5993 struct target_sigevent *target_sevp;
5995 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5996 return -TARGET_EFAULT;
5999 /* This union is awkward on 64 bit systems because it has a 32 bit
6000 * integer and a pointer in it; we follow the conversion approach
6001 * used for handling sigval types in signal.c so the guest should get
6002 * the correct value back even if we did a 64 bit byteswap and it's
6003 * using the 32 bit integer.
6005 host_sevp->sigev_value.sival_ptr =
6006 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6007 host_sevp->sigev_signo =
6008 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6009 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6010 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6012 unlock_user_struct(target_sevp, target_addr, 1);
6016 #if defined(TARGET_NR_mlockall)
6017 static inline int target_to_host_mlockall_arg(int arg)
6021 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6022 result |= MCL_CURRENT;
6024 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6025 result |= MCL_FUTURE;
6031 static inline abi_long host_to_target_stat64(void *cpu_env,
6032 abi_ulong target_addr,
6033 struct stat *host_st)
6035 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6036 if (((CPUARMState *)cpu_env)->eabi) {
6037 struct target_eabi_stat64 *target_st;
6039 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6040 return -TARGET_EFAULT;
6041 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6042 __put_user(host_st->st_dev, &target_st->st_dev);
6043 __put_user(host_st->st_ino, &target_st->st_ino);
6044 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6045 __put_user(host_st->st_ino, &target_st->__st_ino);
6047 __put_user(host_st->st_mode, &target_st->st_mode);
6048 __put_user(host_st->st_nlink, &target_st->st_nlink);
6049 __put_user(host_st->st_uid, &target_st->st_uid);
6050 __put_user(host_st->st_gid, &target_st->st_gid);
6051 __put_user(host_st->st_rdev, &target_st->st_rdev);
6052 __put_user(host_st->st_size, &target_st->st_size);
6053 __put_user(host_st->st_blksize, &target_st->st_blksize);
6054 __put_user(host_st->st_blocks, &target_st->st_blocks);
6055 __put_user(host_st->st_atime, &target_st->target_st_atime);
6056 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6057 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6058 unlock_user_struct(target_st, target_addr, 1);
6062 #if defined(TARGET_HAS_STRUCT_STAT64)
6063 struct target_stat64 *target_st;
6065 struct target_stat *target_st;
6068 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6069 return -TARGET_EFAULT;
6070 memset(target_st, 0, sizeof(*target_st));
6071 __put_user(host_st->st_dev, &target_st->st_dev);
6072 __put_user(host_st->st_ino, &target_st->st_ino);
6073 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6074 __put_user(host_st->st_ino, &target_st->__st_ino);
6076 __put_user(host_st->st_mode, &target_st->st_mode);
6077 __put_user(host_st->st_nlink, &target_st->st_nlink);
6078 __put_user(host_st->st_uid, &target_st->st_uid);
6079 __put_user(host_st->st_gid, &target_st->st_gid);
6080 __put_user(host_st->st_rdev, &target_st->st_rdev);
6081 /* XXX: better use of kernel struct */
6082 __put_user(host_st->st_size, &target_st->st_size);
6083 __put_user(host_st->st_blksize, &target_st->st_blksize);
6084 __put_user(host_st->st_blocks, &target_st->st_blocks);
6085 __put_user(host_st->st_atime, &target_st->target_st_atime);
6086 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6087 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6088 unlock_user_struct(target_st, target_addr, 1);
6094 /* ??? Using host futex calls even when target atomic operations
6095 are not really atomic probably breaks things. However implementing
6096 futexes locally would make futexes shared between multiple processes
6097 tricky. However they're probably useless because guest atomic
6098 operations won't work either. */
6099 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6100 target_ulong uaddr2, int val3)
6102 struct timespec ts, *pts;
6105 /* ??? We assume FUTEX_* constants are the same on both host
6107 #ifdef FUTEX_CMD_MASK
6108 base_op = op & FUTEX_CMD_MASK;
6114 case FUTEX_WAIT_BITSET:
6117 target_to_host_timespec(pts, timeout);
6121 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6124 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6126 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6128 case FUTEX_CMP_REQUEUE:
6130 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6131 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6132 But the prototype takes a `struct timespec *'; insert casts
6133 to satisfy the compiler. We do not need to tswap TIMEOUT
6134 since it's not compared to guest memory. */
6135 pts = (struct timespec *)(uintptr_t) timeout;
6136 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6138 (base_op == FUTEX_CMP_REQUEUE
6142 return -TARGET_ENOSYS;
6145 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6146 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6147 abi_long handle, abi_long mount_id,
6150 struct file_handle *target_fh;
6151 struct file_handle *fh;
6155 unsigned int size, total_size;
6157 if (get_user_s32(size, handle)) {
6158 return -TARGET_EFAULT;
6161 name = lock_user_string(pathname);
6163 return -TARGET_EFAULT;
6166 total_size = sizeof(struct file_handle) + size;
6167 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6169 unlock_user(name, pathname, 0);
6170 return -TARGET_EFAULT;
6173 fh = g_malloc0(total_size);
6174 fh->handle_bytes = size;
6176 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6177 unlock_user(name, pathname, 0);
6179 /* man name_to_handle_at(2):
6180 * Other than the use of the handle_bytes field, the caller should treat
6181 * the file_handle structure as an opaque data type
6184 memcpy(target_fh, fh, total_size);
6185 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6186 target_fh->handle_type = tswap32(fh->handle_type);
6188 unlock_user(target_fh, handle, total_size);
6190 if (put_user_s32(mid, mount_id)) {
6191 return -TARGET_EFAULT;
6199 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6200 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6203 struct file_handle *target_fh;
6204 struct file_handle *fh;
6205 unsigned int size, total_size;
6208 if (get_user_s32(size, handle)) {
6209 return -TARGET_EFAULT;
6212 total_size = sizeof(struct file_handle) + size;
6213 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6215 return -TARGET_EFAULT;
6218 fh = g_memdup(target_fh, total_size);
6219 fh->handle_bytes = size;
6220 fh->handle_type = tswap32(target_fh->handle_type);
6222 ret = get_errno(open_by_handle_at(mount_fd, fh,
6223 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6227 unlock_user(target_fh, handle, total_size);
6233 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6235 /* signalfd siginfo conversion */
6238 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6239 const struct signalfd_siginfo *info)
6241 int sig = host_to_target_signal(info->ssi_signo);
6243 /* linux/signalfd.h defines a ssi_addr_lsb
6244 * not defined in sys/signalfd.h but used by some kernels
6247 #ifdef BUS_MCEERR_AO
6248 if (tinfo->ssi_signo == SIGBUS &&
6249 (tinfo->ssi_code == BUS_MCEERR_AR ||
6250 tinfo->ssi_code == BUS_MCEERR_AO)) {
6251 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6252 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6253 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6257 tinfo->ssi_signo = tswap32(sig);
6258 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6259 tinfo->ssi_code = tswap32(info->ssi_code);
6260 tinfo->ssi_pid = tswap32(info->ssi_pid);
6261 tinfo->ssi_uid = tswap32(info->ssi_uid);
6262 tinfo->ssi_fd = tswap32(info->ssi_fd);
6263 tinfo->ssi_tid = tswap32(info->ssi_tid);
6264 tinfo->ssi_band = tswap32(info->ssi_band);
6265 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6266 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6267 tinfo->ssi_status = tswap32(info->ssi_status);
6268 tinfo->ssi_int = tswap32(info->ssi_int);
6269 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6270 tinfo->ssi_utime = tswap64(info->ssi_utime);
6271 tinfo->ssi_stime = tswap64(info->ssi_stime);
6272 tinfo->ssi_addr = tswap64(info->ssi_addr);
6275 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6279 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6280 host_to_target_signalfd_siginfo(buf + i, buf + i);
6286 static TargetFdTrans target_signalfd_trans = {
6287 .host_to_target_data = host_to_target_data_signalfd,
6290 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6293 target_sigset_t *target_mask;
6297 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6298 return -TARGET_EINVAL;
6300 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6301 return -TARGET_EFAULT;
6304 target_to_host_sigset(&host_mask, target_mask);
6306 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6308 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6310 fd_trans_register(ret, &target_signalfd_trans);
6313 unlock_user_struct(target_mask, mask, 0);
6319 /* Map host to target signal numbers for the wait family of syscalls.
6320 Assume all other status bits are the same. */
6321 int host_to_target_waitstatus(int status)
6323 if (WIFSIGNALED(status)) {
6324 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6326 if (WIFSTOPPED(status)) {
6327 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6333 static int open_self_cmdline(void *cpu_env, int fd)
6336 bool word_skipped = false;
6338 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6348 nb_read = read(fd_orig, buf, sizeof(buf));
6351 fd_orig = close(fd_orig);
6354 } else if (nb_read == 0) {
6358 if (!word_skipped) {
6359 /* Skip the first string, which is the path to qemu-*-static
6360 instead of the actual command. */
6361 cp_buf = memchr(buf, 0, sizeof(buf));
6363 /* Null byte found, skip one string */
6365 nb_read -= cp_buf - buf;
6366 word_skipped = true;
6371 if (write(fd, cp_buf, nb_read) != nb_read) {
6380 return close(fd_orig);
6383 static int open_self_maps(void *cpu_env, int fd)
6385 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6386 TaskState *ts = cpu->opaque;
6392 fp = fopen("/proc/self/maps", "r");
6397 while ((read = getline(&line, &len, fp)) != -1) {
6398 int fields, dev_maj, dev_min, inode;
6399 uint64_t min, max, offset;
6400 char flag_r, flag_w, flag_x, flag_p;
6401 char path[512] = "";
6402 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6403 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6404 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6406 if ((fields < 10) || (fields > 11)) {
6409 if (h2g_valid(min)) {
6410 int flags = page_get_flags(h2g(min));
6411 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6412 if (page_check_range(h2g(min), max - min, flags) == -1) {
6415 if (h2g(min) == ts->info->stack_limit) {
6416 pstrcpy(path, sizeof(path), " [stack]");
6418 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6419 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6420 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6421 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6422 path[0] ? " " : "", path);
6432 static int open_self_stat(void *cpu_env, int fd)
6434 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6435 TaskState *ts = cpu->opaque;
6436 abi_ulong start_stack = ts->info->start_stack;
6439 for (i = 0; i < 44; i++) {
6447 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6448 } else if (i == 1) {
6450 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6451 } else if (i == 27) {
6454 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6456 /* for the rest, there is MasterCard */
6457 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6461 if (write(fd, buf, len) != len) {
6469 static int open_self_auxv(void *cpu_env, int fd)
6471 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6472 TaskState *ts = cpu->opaque;
6473 abi_ulong auxv = ts->info->saved_auxv;
6474 abi_ulong len = ts->info->auxv_len;
6478 * Auxiliary vector is stored in target process stack.
6479 * read in whole auxv vector and copy it to file
6481 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6485 r = write(fd, ptr, len);
6492 lseek(fd, 0, SEEK_SET);
6493 unlock_user(ptr, auxv, len);
6499 static int is_proc_myself(const char *filename, const char *entry)
6501 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6502 filename += strlen("/proc/");
6503 if (!strncmp(filename, "self/", strlen("self/"))) {
6504 filename += strlen("self/");
6505 } else if (*filename >= '1' && *filename <= '9') {
6507 snprintf(myself, sizeof(myself), "%d/", getpid());
6508 if (!strncmp(filename, myself, strlen(myself))) {
6509 filename += strlen(myself);
6516 if (!strcmp(filename, entry)) {
6523 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6524 static int is_proc(const char *filename, const char *entry)
6526 return strcmp(filename, entry) == 0;
6529 static int open_net_route(void *cpu_env, int fd)
6536 fp = fopen("/proc/net/route", "r");
6543 read = getline(&line, &len, fp);
6544 dprintf(fd, "%s", line);
6548 while ((read = getline(&line, &len, fp)) != -1) {
6550 uint32_t dest, gw, mask;
6551 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6552 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6553 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6554 &mask, &mtu, &window, &irtt);
6555 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6556 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6557 metric, tswap32(mask), mtu, window, irtt);
6567 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6570 const char *filename;
6571 int (*fill)(void *cpu_env, int fd);
6572 int (*cmp)(const char *s1, const char *s2);
6574 const struct fake_open *fake_open;
6575 static const struct fake_open fakes[] = {
6576 { "maps", open_self_maps, is_proc_myself },
6577 { "stat", open_self_stat, is_proc_myself },
6578 { "auxv", open_self_auxv, is_proc_myself },
6579 { "cmdline", open_self_cmdline, is_proc_myself },
6580 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6581 { "/proc/net/route", open_net_route, is_proc },
6583 { NULL, NULL, NULL }
6586 if (is_proc_myself(pathname, "exe")) {
6587 int execfd = qemu_getauxval(AT_EXECFD);
6588 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6591 for (fake_open = fakes; fake_open->filename; fake_open++) {
6592 if (fake_open->cmp(pathname, fake_open->filename)) {
6597 if (fake_open->filename) {
6599 char filename[PATH_MAX];
6602 /* create temporary file to map stat to */
6603 tmpdir = getenv("TMPDIR");
6606 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6607 fd = mkstemp(filename);
6613 if ((r = fake_open->fill(cpu_env, fd))) {
6619 lseek(fd, 0, SEEK_SET);
6624 return safe_openat(dirfd, path(pathname), flags, mode);
6627 #define TIMER_MAGIC 0x0caf0000
6628 #define TIMER_MAGIC_MASK 0xffff0000
6630 /* Convert QEMU provided timer ID back to internal 16bit index format */
6631 static target_timer_t get_timer_id(abi_long arg)
6633 target_timer_t timerid = arg;
6635 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6636 return -TARGET_EINVAL;
6641 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6642 return -TARGET_EINVAL;
6648 /* do_syscall() should always have a single exit point at the end so
6649 that actions, such as logging of syscall results, can be performed.
6650 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6651 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
6652 abi_long arg2, abi_long arg3, abi_long arg4,
6653 abi_long arg5, abi_long arg6, abi_long arg7,
6656 CPUState *cpu = ENV_GET_CPU(cpu_env);
6662 #if defined(DEBUG_ERESTARTSYS)
6663 /* Debug-only code for exercising the syscall-restart code paths
6664 * in the per-architecture cpu main loops: restart every syscall
6665 * the guest makes once before letting it through.
6672 return -TARGET_ERESTARTSYS;
6678 gemu_log("syscall %d", num);
6681 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
6684 case TARGET_NR_exit:
6685 /* In old applications this may be used to implement _exit(2).
6686 However in threaded applictions it is used for thread termination,
6687 and _exit_group is used for application termination.
6688 Do thread termination if we have more then one thread. */
6690 if (block_signals()) {
6691 ret = -TARGET_ERESTARTSYS;
6695 if (CPU_NEXT(first_cpu)) {
6699 /* Remove the CPU from the list. */
6700 QTAILQ_REMOVE(&cpus, cpu, node);
6703 if (ts->child_tidptr) {
6704 put_user_u32(0, ts->child_tidptr);
6705 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6709 object_unref(OBJECT(cpu));
6711 rcu_unregister_thread();
6717 gdb_exit(cpu_env, arg1);
6719 ret = 0; /* avoid warning */
6721 case TARGET_NR_read:
6725 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6727 ret = get_errno(safe_read(arg1, p, arg3));
6729 fd_trans_host_to_target_data(arg1)) {
6730 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6732 unlock_user(p, arg2, ret);
6735 case TARGET_NR_write:
6736 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6738 ret = get_errno(safe_write(arg1, p, arg3));
6739 unlock_user(p, arg2, 0);
6741 #ifdef TARGET_NR_open
6742 case TARGET_NR_open:
6743 if (!(p = lock_user_string(arg1)))
6745 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6746 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6748 fd_trans_unregister(ret);
6749 unlock_user(p, arg1, 0);
6752 case TARGET_NR_openat:
6753 if (!(p = lock_user_string(arg2)))
6755 ret = get_errno(do_openat(cpu_env, arg1, p,
6756 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6758 fd_trans_unregister(ret);
6759 unlock_user(p, arg2, 0);
6761 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6762 case TARGET_NR_name_to_handle_at:
6763 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6766 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6767 case TARGET_NR_open_by_handle_at:
6768 ret = do_open_by_handle_at(arg1, arg2, arg3);
6769 fd_trans_unregister(ret);
6772 case TARGET_NR_close:
6773 fd_trans_unregister(arg1);
6774 ret = get_errno(close(arg1));
6779 #ifdef TARGET_NR_fork
6780 case TARGET_NR_fork:
6781 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6784 #ifdef TARGET_NR_waitpid
6785 case TARGET_NR_waitpid:
6788 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6789 if (!is_error(ret) && arg2 && ret
6790 && put_user_s32(host_to_target_waitstatus(status), arg2))
6795 #ifdef TARGET_NR_waitid
6796 case TARGET_NR_waitid:
6800 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6801 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6802 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6804 host_to_target_siginfo(p, &info);
6805 unlock_user(p, arg3, sizeof(target_siginfo_t));
6810 #ifdef TARGET_NR_creat /* not on alpha */
6811 case TARGET_NR_creat:
6812 if (!(p = lock_user_string(arg1)))
6814 ret = get_errno(creat(p, arg2));
6815 fd_trans_unregister(ret);
6816 unlock_user(p, arg1, 0);
6819 #ifdef TARGET_NR_link
6820 case TARGET_NR_link:
6823 p = lock_user_string(arg1);
6824 p2 = lock_user_string(arg2);
6826 ret = -TARGET_EFAULT;
6828 ret = get_errno(link(p, p2));
6829 unlock_user(p2, arg2, 0);
6830 unlock_user(p, arg1, 0);
6834 #if defined(TARGET_NR_linkat)
6835 case TARGET_NR_linkat:
6840 p = lock_user_string(arg2);
6841 p2 = lock_user_string(arg4);
6843 ret = -TARGET_EFAULT;
6845 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6846 unlock_user(p, arg2, 0);
6847 unlock_user(p2, arg4, 0);
6851 #ifdef TARGET_NR_unlink
6852 case TARGET_NR_unlink:
6853 if (!(p = lock_user_string(arg1)))
6855 ret = get_errno(unlink(p));
6856 unlock_user(p, arg1, 0);
6859 #if defined(TARGET_NR_unlinkat)
6860 case TARGET_NR_unlinkat:
6861 if (!(p = lock_user_string(arg2)))
6863 ret = get_errno(unlinkat(arg1, p, arg3));
6864 unlock_user(p, arg2, 0);
6867 case TARGET_NR_execve:
6869 char **argp, **envp;
6872 abi_ulong guest_argp;
6873 abi_ulong guest_envp;
6880 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6881 if (get_user_ual(addr, gp))
6889 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6890 if (get_user_ual(addr, gp))
6897 argp = alloca((argc + 1) * sizeof(void *));
6898 envp = alloca((envc + 1) * sizeof(void *));
6900 for (gp = guest_argp, q = argp; gp;
6901 gp += sizeof(abi_ulong), q++) {
6902 if (get_user_ual(addr, gp))
6906 if (!(*q = lock_user_string(addr)))
6908 total_size += strlen(*q) + 1;
6912 for (gp = guest_envp, q = envp; gp;
6913 gp += sizeof(abi_ulong), q++) {
6914 if (get_user_ual(addr, gp))
6918 if (!(*q = lock_user_string(addr)))
6920 total_size += strlen(*q) + 1;
6924 if (!(p = lock_user_string(arg1)))
6926 /* Although execve() is not an interruptible syscall it is
6927 * a special case where we must use the safe_syscall wrapper:
6928 * if we allow a signal to happen before we make the host
6929 * syscall then we will 'lose' it, because at the point of
6930 * execve the process leaves QEMU's control. So we use the
6931 * safe syscall wrapper to ensure that we either take the
6932 * signal as a guest signal, or else it does not happen
6933 * before the execve completes and makes it the other
6934 * program's problem.
6936 ret = get_errno(safe_execve(p, argp, envp));
6937 unlock_user(p, arg1, 0);
6942 ret = -TARGET_EFAULT;
6945 for (gp = guest_argp, q = argp; *q;
6946 gp += sizeof(abi_ulong), q++) {
6947 if (get_user_ual(addr, gp)
6950 unlock_user(*q, addr, 0);
6952 for (gp = guest_envp, q = envp; *q;
6953 gp += sizeof(abi_ulong), q++) {
6954 if (get_user_ual(addr, gp)
6957 unlock_user(*q, addr, 0);
6961 case TARGET_NR_chdir:
6962 if (!(p = lock_user_string(arg1)))
6964 ret = get_errno(chdir(p));
6965 unlock_user(p, arg1, 0);
6967 #ifdef TARGET_NR_time
6968 case TARGET_NR_time:
6971 ret = get_errno(time(&host_time));
6974 && put_user_sal(host_time, arg1))
6979 #ifdef TARGET_NR_mknod
6980 case TARGET_NR_mknod:
6981 if (!(p = lock_user_string(arg1)))
6983 ret = get_errno(mknod(p, arg2, arg3));
6984 unlock_user(p, arg1, 0);
6987 #if defined(TARGET_NR_mknodat)
6988 case TARGET_NR_mknodat:
6989 if (!(p = lock_user_string(arg2)))
6991 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6992 unlock_user(p, arg2, 0);
6995 #ifdef TARGET_NR_chmod
6996 case TARGET_NR_chmod:
6997 if (!(p = lock_user_string(arg1)))
6999 ret = get_errno(chmod(p, arg2));
7000 unlock_user(p, arg1, 0);
7003 #ifdef TARGET_NR_break
7004 case TARGET_NR_break:
7007 #ifdef TARGET_NR_oldstat
7008 case TARGET_NR_oldstat:
7011 case TARGET_NR_lseek:
7012 ret = get_errno(lseek(arg1, arg2, arg3));
7014 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7015 /* Alpha specific */
7016 case TARGET_NR_getxpid:
7017 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7018 ret = get_errno(getpid());
7021 #ifdef TARGET_NR_getpid
7022 case TARGET_NR_getpid:
7023 ret = get_errno(getpid());
7026 case TARGET_NR_mount:
7028 /* need to look at the data field */
7032 p = lock_user_string(arg1);
7040 p2 = lock_user_string(arg2);
7043 unlock_user(p, arg1, 0);
7049 p3 = lock_user_string(arg3);
7052 unlock_user(p, arg1, 0);
7054 unlock_user(p2, arg2, 0);
7061 /* FIXME - arg5 should be locked, but it isn't clear how to
7062 * do that since it's not guaranteed to be a NULL-terminated
7066 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7068 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7070 ret = get_errno(ret);
7073 unlock_user(p, arg1, 0);
7075 unlock_user(p2, arg2, 0);
7077 unlock_user(p3, arg3, 0);
7081 #ifdef TARGET_NR_umount
7082 case TARGET_NR_umount:
7083 if (!(p = lock_user_string(arg1)))
7085 ret = get_errno(umount(p));
7086 unlock_user(p, arg1, 0);
7089 #ifdef TARGET_NR_stime /* not on alpha */
7090 case TARGET_NR_stime:
7093 if (get_user_sal(host_time, arg1))
7095 ret = get_errno(stime(&host_time));
7099 case TARGET_NR_ptrace:
7101 #ifdef TARGET_NR_alarm /* not on alpha */
7102 case TARGET_NR_alarm:
7106 #ifdef TARGET_NR_oldfstat
7107 case TARGET_NR_oldfstat:
7110 #ifdef TARGET_NR_pause /* not on alpha */
7111 case TARGET_NR_pause:
7112 if (!block_signals()) {
7113 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7115 ret = -TARGET_EINTR;
7118 #ifdef TARGET_NR_utime
7119 case TARGET_NR_utime:
7121 struct utimbuf tbuf, *host_tbuf;
7122 struct target_utimbuf *target_tbuf;
7124 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7126 tbuf.actime = tswapal(target_tbuf->actime);
7127 tbuf.modtime = tswapal(target_tbuf->modtime);
7128 unlock_user_struct(target_tbuf, arg2, 0);
7133 if (!(p = lock_user_string(arg1)))
7135 ret = get_errno(utime(p, host_tbuf));
7136 unlock_user(p, arg1, 0);
7140 #ifdef TARGET_NR_utimes
7141 case TARGET_NR_utimes:
7143 struct timeval *tvp, tv[2];
7145 if (copy_from_user_timeval(&tv[0], arg2)
7146 || copy_from_user_timeval(&tv[1],
7147 arg2 + sizeof(struct target_timeval)))
7153 if (!(p = lock_user_string(arg1)))
7155 ret = get_errno(utimes(p, tvp));
7156 unlock_user(p, arg1, 0);
7160 #if defined(TARGET_NR_futimesat)
7161 case TARGET_NR_futimesat:
7163 struct timeval *tvp, tv[2];
7165 if (copy_from_user_timeval(&tv[0], arg3)
7166 || copy_from_user_timeval(&tv[1],
7167 arg3 + sizeof(struct target_timeval)))
7173 if (!(p = lock_user_string(arg2)))
7175 ret = get_errno(futimesat(arg1, path(p), tvp));
7176 unlock_user(p, arg2, 0);
7180 #ifdef TARGET_NR_stty
7181 case TARGET_NR_stty:
7184 #ifdef TARGET_NR_gtty
7185 case TARGET_NR_gtty:
7188 #ifdef TARGET_NR_access
7189 case TARGET_NR_access:
7190 if (!(p = lock_user_string(arg1)))
7192 ret = get_errno(access(path(p), arg2));
7193 unlock_user(p, arg1, 0);
7196 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7197 case TARGET_NR_faccessat:
7198 if (!(p = lock_user_string(arg2)))
7200 ret = get_errno(faccessat(arg1, p, arg3, 0));
7201 unlock_user(p, arg2, 0);
7204 #ifdef TARGET_NR_nice /* not on alpha */
7205 case TARGET_NR_nice:
7206 ret = get_errno(nice(arg1));
7209 #ifdef TARGET_NR_ftime
7210 case TARGET_NR_ftime:
7213 case TARGET_NR_sync:
7217 case TARGET_NR_kill:
7218 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7220 #ifdef TARGET_NR_rename
7221 case TARGET_NR_rename:
7224 p = lock_user_string(arg1);
7225 p2 = lock_user_string(arg2);
7227 ret = -TARGET_EFAULT;
7229 ret = get_errno(rename(p, p2));
7230 unlock_user(p2, arg2, 0);
7231 unlock_user(p, arg1, 0);
7235 #if defined(TARGET_NR_renameat)
7236 case TARGET_NR_renameat:
7239 p = lock_user_string(arg2);
7240 p2 = lock_user_string(arg4);
7242 ret = -TARGET_EFAULT;
7244 ret = get_errno(renameat(arg1, p, arg3, p2));
7245 unlock_user(p2, arg4, 0);
7246 unlock_user(p, arg2, 0);
7250 #ifdef TARGET_NR_mkdir
7251 case TARGET_NR_mkdir:
7252 if (!(p = lock_user_string(arg1)))
7254 ret = get_errno(mkdir(p, arg2));
7255 unlock_user(p, arg1, 0);
7258 #if defined(TARGET_NR_mkdirat)
7259 case TARGET_NR_mkdirat:
7260 if (!(p = lock_user_string(arg2)))
7262 ret = get_errno(mkdirat(arg1, p, arg3));
7263 unlock_user(p, arg2, 0);
7266 #ifdef TARGET_NR_rmdir
7267 case TARGET_NR_rmdir:
7268 if (!(p = lock_user_string(arg1)))
7270 ret = get_errno(rmdir(p));
7271 unlock_user(p, arg1, 0);
7275 ret = get_errno(dup(arg1));
7277 fd_trans_dup(arg1, ret);
7280 #ifdef TARGET_NR_pipe
7281 case TARGET_NR_pipe:
7282 ret = do_pipe(cpu_env, arg1, 0, 0);
7285 #ifdef TARGET_NR_pipe2
7286 case TARGET_NR_pipe2:
7287 ret = do_pipe(cpu_env, arg1,
7288 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7291 case TARGET_NR_times:
7293 struct target_tms *tmsp;
7295 ret = get_errno(times(&tms));
7297 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7300 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7301 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7302 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7303 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7306 ret = host_to_target_clock_t(ret);
7309 #ifdef TARGET_NR_prof
7310 case TARGET_NR_prof:
7313 #ifdef TARGET_NR_signal
7314 case TARGET_NR_signal:
7317 case TARGET_NR_acct:
7319 ret = get_errno(acct(NULL));
7321 if (!(p = lock_user_string(arg1)))
7323 ret = get_errno(acct(path(p)));
7324 unlock_user(p, arg1, 0);
7327 #ifdef TARGET_NR_umount2
7328 case TARGET_NR_umount2:
7329 if (!(p = lock_user_string(arg1)))
7331 ret = get_errno(umount2(p, arg2));
7332 unlock_user(p, arg1, 0);
7335 #ifdef TARGET_NR_lock
7336 case TARGET_NR_lock:
7339 case TARGET_NR_ioctl:
7340 ret = do_ioctl(arg1, arg2, arg3);
7342 case TARGET_NR_fcntl:
7343 ret = do_fcntl(arg1, arg2, arg3);
7345 #ifdef TARGET_NR_mpx
7349 case TARGET_NR_setpgid:
7350 ret = get_errno(setpgid(arg1, arg2));
7352 #ifdef TARGET_NR_ulimit
7353 case TARGET_NR_ulimit:
7356 #ifdef TARGET_NR_oldolduname
7357 case TARGET_NR_oldolduname:
7360 case TARGET_NR_umask:
7361 ret = get_errno(umask(arg1));
7363 case TARGET_NR_chroot:
7364 if (!(p = lock_user_string(arg1)))
7366 ret = get_errno(chroot(p));
7367 unlock_user(p, arg1, 0);
7369 #ifdef TARGET_NR_ustat
7370 case TARGET_NR_ustat:
7373 #ifdef TARGET_NR_dup2
7374 case TARGET_NR_dup2:
7375 ret = get_errno(dup2(arg1, arg2));
7377 fd_trans_dup(arg1, arg2);
7381 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7382 case TARGET_NR_dup3:
7383 ret = get_errno(dup3(arg1, arg2, arg3));
7385 fd_trans_dup(arg1, arg2);
7389 #ifdef TARGET_NR_getppid /* not on alpha */
7390 case TARGET_NR_getppid:
7391 ret = get_errno(getppid());
7394 #ifdef TARGET_NR_getpgrp
7395 case TARGET_NR_getpgrp:
7396 ret = get_errno(getpgrp());
7399 case TARGET_NR_setsid:
7400 ret = get_errno(setsid());
7402 #ifdef TARGET_NR_sigaction
7403 case TARGET_NR_sigaction:
7405 #if defined(TARGET_ALPHA)
7406 struct target_sigaction act, oact, *pact = 0;
7407 struct target_old_sigaction *old_act;
7409 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7411 act._sa_handler = old_act->_sa_handler;
7412 target_siginitset(&act.sa_mask, old_act->sa_mask);
7413 act.sa_flags = old_act->sa_flags;
7414 act.sa_restorer = 0;
7415 unlock_user_struct(old_act, arg2, 0);
7418 ret = get_errno(do_sigaction(arg1, pact, &oact));
7419 if (!is_error(ret) && arg3) {
7420 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7422 old_act->_sa_handler = oact._sa_handler;
7423 old_act->sa_mask = oact.sa_mask.sig[0];
7424 old_act->sa_flags = oact.sa_flags;
7425 unlock_user_struct(old_act, arg3, 1);
7427 #elif defined(TARGET_MIPS)
7428 struct target_sigaction act, oact, *pact, *old_act;
7431 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7433 act._sa_handler = old_act->_sa_handler;
7434 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7435 act.sa_flags = old_act->sa_flags;
7436 unlock_user_struct(old_act, arg2, 0);
7442 ret = get_errno(do_sigaction(arg1, pact, &oact));
7444 if (!is_error(ret) && arg3) {
7445 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7447 old_act->_sa_handler = oact._sa_handler;
7448 old_act->sa_flags = oact.sa_flags;
7449 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7450 old_act->sa_mask.sig[1] = 0;
7451 old_act->sa_mask.sig[2] = 0;
7452 old_act->sa_mask.sig[3] = 0;
7453 unlock_user_struct(old_act, arg3, 1);
7456 struct target_old_sigaction *old_act;
7457 struct target_sigaction act, oact, *pact;
7459 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7461 act._sa_handler = old_act->_sa_handler;
7462 target_siginitset(&act.sa_mask, old_act->sa_mask);
7463 act.sa_flags = old_act->sa_flags;
7464 act.sa_restorer = old_act->sa_restorer;
7465 unlock_user_struct(old_act, arg2, 0);
7470 ret = get_errno(do_sigaction(arg1, pact, &oact));
7471 if (!is_error(ret) && arg3) {
7472 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7474 old_act->_sa_handler = oact._sa_handler;
7475 old_act->sa_mask = oact.sa_mask.sig[0];
7476 old_act->sa_flags = oact.sa_flags;
7477 old_act->sa_restorer = oact.sa_restorer;
7478 unlock_user_struct(old_act, arg3, 1);
7484 case TARGET_NR_rt_sigaction:
7486 #if defined(TARGET_ALPHA)
7487 struct target_sigaction act, oact, *pact = 0;
7488 struct target_rt_sigaction *rt_act;
7489 /* ??? arg4 == sizeof(sigset_t). */
7491 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7493 act._sa_handler = rt_act->_sa_handler;
7494 act.sa_mask = rt_act->sa_mask;
7495 act.sa_flags = rt_act->sa_flags;
7496 act.sa_restorer = arg5;
7497 unlock_user_struct(rt_act, arg2, 0);
7500 ret = get_errno(do_sigaction(arg1, pact, &oact));
7501 if (!is_error(ret) && arg3) {
7502 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7504 rt_act->_sa_handler = oact._sa_handler;
7505 rt_act->sa_mask = oact.sa_mask;
7506 rt_act->sa_flags = oact.sa_flags;
7507 unlock_user_struct(rt_act, arg3, 1);
7510 struct target_sigaction *act;
7511 struct target_sigaction *oact;
7514 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
7519 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7520 ret = -TARGET_EFAULT;
7521 goto rt_sigaction_fail;
7525 ret = get_errno(do_sigaction(arg1, act, oact));
7528 unlock_user_struct(act, arg2, 0);
7530 unlock_user_struct(oact, arg3, 1);
7534 #ifdef TARGET_NR_sgetmask /* not on alpha */
7535 case TARGET_NR_sgetmask:
7538 abi_ulong target_set;
7539 ret = do_sigprocmask(0, NULL, &cur_set);
7541 host_to_target_old_sigset(&target_set, &cur_set);
7547 #ifdef TARGET_NR_ssetmask /* not on alpha */
7548 case TARGET_NR_ssetmask:
7550 sigset_t set, oset, cur_set;
7551 abi_ulong target_set = arg1;
7552 /* We only have one word of the new mask so we must read
7553 * the rest of it with do_sigprocmask() and OR in this word.
7554 * We are guaranteed that a do_sigprocmask() that only queries
7555 * the signal mask will not fail.
7557 ret = do_sigprocmask(0, NULL, &cur_set);
7559 target_to_host_old_sigset(&set, &target_set);
7560 sigorset(&set, &set, &cur_set);
7561 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7563 host_to_target_old_sigset(&target_set, &oset);
7569 #ifdef TARGET_NR_sigprocmask
7570 case TARGET_NR_sigprocmask:
7572 #if defined(TARGET_ALPHA)
7573 sigset_t set, oldset;
7578 case TARGET_SIG_BLOCK:
7581 case TARGET_SIG_UNBLOCK:
7584 case TARGET_SIG_SETMASK:
7588 ret = -TARGET_EINVAL;
7592 target_to_host_old_sigset(&set, &mask);
7594 ret = do_sigprocmask(how, &set, &oldset);
7595 if (!is_error(ret)) {
7596 host_to_target_old_sigset(&mask, &oldset);
7598 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7601 sigset_t set, oldset, *set_ptr;
7606 case TARGET_SIG_BLOCK:
7609 case TARGET_SIG_UNBLOCK:
7612 case TARGET_SIG_SETMASK:
7616 ret = -TARGET_EINVAL;
7619 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7621 target_to_host_old_sigset(&set, p);
7622 unlock_user(p, arg2, 0);
7628 ret = do_sigprocmask(how, set_ptr, &oldset);
7629 if (!is_error(ret) && arg3) {
7630 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7632 host_to_target_old_sigset(p, &oldset);
7633 unlock_user(p, arg3, sizeof(target_sigset_t));
7639 case TARGET_NR_rt_sigprocmask:
7642 sigset_t set, oldset, *set_ptr;
7646 case TARGET_SIG_BLOCK:
7649 case TARGET_SIG_UNBLOCK:
7652 case TARGET_SIG_SETMASK:
7656 ret = -TARGET_EINVAL;
7659 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7661 target_to_host_sigset(&set, p);
7662 unlock_user(p, arg2, 0);
7668 ret = do_sigprocmask(how, set_ptr, &oldset);
7669 if (!is_error(ret) && arg3) {
7670 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7672 host_to_target_sigset(p, &oldset);
7673 unlock_user(p, arg3, sizeof(target_sigset_t));
7677 #ifdef TARGET_NR_sigpending
7678 case TARGET_NR_sigpending:
7681 ret = get_errno(sigpending(&set));
7682 if (!is_error(ret)) {
7683 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7685 host_to_target_old_sigset(p, &set);
7686 unlock_user(p, arg1, sizeof(target_sigset_t));
7691 case TARGET_NR_rt_sigpending:
7694 ret = get_errno(sigpending(&set));
7695 if (!is_error(ret)) {
7696 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7698 host_to_target_sigset(p, &set);
7699 unlock_user(p, arg1, sizeof(target_sigset_t));
7703 #ifdef TARGET_NR_sigsuspend
7704 case TARGET_NR_sigsuspend:
7706 TaskState *ts = cpu->opaque;
7707 #if defined(TARGET_ALPHA)
7708 abi_ulong mask = arg1;
7709 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7711 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7713 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7714 unlock_user(p, arg1, 0);
7716 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7718 if (ret != -TARGET_ERESTARTSYS) {
7719 ts->in_sigsuspend = 1;
7724 case TARGET_NR_rt_sigsuspend:
7726 TaskState *ts = cpu->opaque;
7727 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7729 target_to_host_sigset(&ts->sigsuspend_mask, p);
7730 unlock_user(p, arg1, 0);
7731 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7733 if (ret != -TARGET_ERESTARTSYS) {
7734 ts->in_sigsuspend = 1;
7738 case TARGET_NR_rt_sigtimedwait:
7741 struct timespec uts, *puts;
7744 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7746 target_to_host_sigset(&set, p);
7747 unlock_user(p, arg1, 0);
7750 target_to_host_timespec(puts, arg3);
7754 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
7755 if (!is_error(ret)) {
7757 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7762 host_to_target_siginfo(p, &uinfo);
7763 unlock_user(p, arg2, sizeof(target_siginfo_t));
7765 ret = host_to_target_signal(ret);
7769 case TARGET_NR_rt_sigqueueinfo:
7772 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
7774 target_to_host_siginfo(&uinfo, p);
7775 unlock_user(p, arg1, 0);
7776 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7779 #ifdef TARGET_NR_sigreturn
7780 case TARGET_NR_sigreturn:
7781 if (block_signals()) {
7782 ret = -TARGET_ERESTARTSYS;
7784 ret = do_sigreturn(cpu_env);
7788 case TARGET_NR_rt_sigreturn:
7789 if (block_signals()) {
7790 ret = -TARGET_ERESTARTSYS;
7792 ret = do_rt_sigreturn(cpu_env);
7795 case TARGET_NR_sethostname:
7796 if (!(p = lock_user_string(arg1)))
7798 ret = get_errno(sethostname(p, arg2));
7799 unlock_user(p, arg1, 0);
7801 case TARGET_NR_setrlimit:
7803 int resource = target_to_host_resource(arg1);
7804 struct target_rlimit *target_rlim;
7806 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7808 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7809 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7810 unlock_user_struct(target_rlim, arg2, 0);
7811 ret = get_errno(setrlimit(resource, &rlim));
7814 case TARGET_NR_getrlimit:
7816 int resource = target_to_host_resource(arg1);
7817 struct target_rlimit *target_rlim;
7820 ret = get_errno(getrlimit(resource, &rlim));
7821 if (!is_error(ret)) {
7822 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7824 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7825 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7826 unlock_user_struct(target_rlim, arg2, 1);
7830 case TARGET_NR_getrusage:
7832 struct rusage rusage;
7833 ret = get_errno(getrusage(arg1, &rusage));
7834 if (!is_error(ret)) {
7835 ret = host_to_target_rusage(arg2, &rusage);
7839 case TARGET_NR_gettimeofday:
7842 ret = get_errno(gettimeofday(&tv, NULL));
7843 if (!is_error(ret)) {
7844 if (copy_to_user_timeval(arg1, &tv))
7849 case TARGET_NR_settimeofday:
7851 struct timeval tv, *ptv = NULL;
7852 struct timezone tz, *ptz = NULL;
7855 if (copy_from_user_timeval(&tv, arg1)) {
7862 if (copy_from_user_timezone(&tz, arg2)) {
7868 ret = get_errno(settimeofday(ptv, ptz));
7871 #if defined(TARGET_NR_select)
7872 case TARGET_NR_select:
7873 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7874 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7877 struct target_sel_arg_struct *sel;
7878 abi_ulong inp, outp, exp, tvp;
7881 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7883 nsel = tswapal(sel->n);
7884 inp = tswapal(sel->inp);
7885 outp = tswapal(sel->outp);
7886 exp = tswapal(sel->exp);
7887 tvp = tswapal(sel->tvp);
7888 unlock_user_struct(sel, arg1, 0);
7889 ret = do_select(nsel, inp, outp, exp, tvp);
7894 #ifdef TARGET_NR_pselect6
7895 case TARGET_NR_pselect6:
7897 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7898 fd_set rfds, wfds, efds;
7899 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7900 struct timespec ts, *ts_ptr;
7903 * The 6th arg is actually two args smashed together,
7904 * so we cannot use the C library.
7912 abi_ulong arg_sigset, arg_sigsize, *arg7;
7913 target_sigset_t *target_sigset;
7921 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7925 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7929 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7935 * This takes a timespec, and not a timeval, so we cannot
7936 * use the do_select() helper ...
7939 if (target_to_host_timespec(&ts, ts_addr)) {
7947 /* Extract the two packed args for the sigset */
7950 sig.size = SIGSET_T_SIZE;
7952 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7956 arg_sigset = tswapal(arg7[0]);
7957 arg_sigsize = tswapal(arg7[1]);
7958 unlock_user(arg7, arg6, 0);
7962 if (arg_sigsize != sizeof(*target_sigset)) {
7963 /* Like the kernel, we enforce correct size sigsets */
7964 ret = -TARGET_EINVAL;
7967 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7968 sizeof(*target_sigset), 1);
7969 if (!target_sigset) {
7972 target_to_host_sigset(&set, target_sigset);
7973 unlock_user(target_sigset, arg_sigset, 0);
7981 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7984 if (!is_error(ret)) {
7985 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7987 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7989 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7992 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7998 #ifdef TARGET_NR_symlink
7999 case TARGET_NR_symlink:
8002 p = lock_user_string(arg1);
8003 p2 = lock_user_string(arg2);
8005 ret = -TARGET_EFAULT;
8007 ret = get_errno(symlink(p, p2));
8008 unlock_user(p2, arg2, 0);
8009 unlock_user(p, arg1, 0);
8013 #if defined(TARGET_NR_symlinkat)
8014 case TARGET_NR_symlinkat:
8017 p = lock_user_string(arg1);
8018 p2 = lock_user_string(arg3);
8020 ret = -TARGET_EFAULT;
8022 ret = get_errno(symlinkat(p, arg2, p2));
8023 unlock_user(p2, arg3, 0);
8024 unlock_user(p, arg1, 0);
8028 #ifdef TARGET_NR_oldlstat
8029 case TARGET_NR_oldlstat:
8032 #ifdef TARGET_NR_readlink
8033 case TARGET_NR_readlink:
8036 p = lock_user_string(arg1);
8037 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8039 ret = -TARGET_EFAULT;
8041 /* Short circuit this for the magic exe check. */
8042 ret = -TARGET_EINVAL;
8043 } else if (is_proc_myself((const char *)p, "exe")) {
8044 char real[PATH_MAX], *temp;
8045 temp = realpath(exec_path, real);
8046 /* Return value is # of bytes that we wrote to the buffer. */
8048 ret = get_errno(-1);
8050 /* Don't worry about sign mismatch as earlier mapping
8051 * logic would have thrown a bad address error. */
8052 ret = MIN(strlen(real), arg3);
8053 /* We cannot NUL terminate the string. */
8054 memcpy(p2, real, ret);
8057 ret = get_errno(readlink(path(p), p2, arg3));
8059 unlock_user(p2, arg2, ret);
8060 unlock_user(p, arg1, 0);
8064 #if defined(TARGET_NR_readlinkat)
8065 case TARGET_NR_readlinkat:
8068 p = lock_user_string(arg2);
8069 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8071 ret = -TARGET_EFAULT;
8072 } else if (is_proc_myself((const char *)p, "exe")) {
8073 char real[PATH_MAX], *temp;
8074 temp = realpath(exec_path, real);
8075 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8076 snprintf((char *)p2, arg4, "%s", real);
8078 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8080 unlock_user(p2, arg3, ret);
8081 unlock_user(p, arg2, 0);
8085 #ifdef TARGET_NR_uselib
8086 case TARGET_NR_uselib:
8089 #ifdef TARGET_NR_swapon
8090 case TARGET_NR_swapon:
8091 if (!(p = lock_user_string(arg1)))
8093 ret = get_errno(swapon(p, arg2));
8094 unlock_user(p, arg1, 0);
8097 case TARGET_NR_reboot:
8098 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8099 /* arg4 must be ignored in all other cases */
8100 p = lock_user_string(arg4);
8104 ret = get_errno(reboot(arg1, arg2, arg3, p));
8105 unlock_user(p, arg4, 0);
8107 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8110 #ifdef TARGET_NR_readdir
8111 case TARGET_NR_readdir:
8114 #ifdef TARGET_NR_mmap
8115 case TARGET_NR_mmap:
8116 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8117 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8118 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8119 || defined(TARGET_S390X)
8122 abi_ulong v1, v2, v3, v4, v5, v6;
8123 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8131 unlock_user(v, arg1, 0);
8132 ret = get_errno(target_mmap(v1, v2, v3,
8133 target_to_host_bitmask(v4, mmap_flags_tbl),
8137 ret = get_errno(target_mmap(arg1, arg2, arg3,
8138 target_to_host_bitmask(arg4, mmap_flags_tbl),
8144 #ifdef TARGET_NR_mmap2
8145 case TARGET_NR_mmap2:
8147 #define MMAP_SHIFT 12
8149 ret = get_errno(target_mmap(arg1, arg2, arg3,
8150 target_to_host_bitmask(arg4, mmap_flags_tbl),
8152 arg6 << MMAP_SHIFT));
8155 case TARGET_NR_munmap:
8156 ret = get_errno(target_munmap(arg1, arg2));
8158 case TARGET_NR_mprotect:
8160 TaskState *ts = cpu->opaque;
8161 /* Special hack to detect libc making the stack executable. */
8162 if ((arg3 & PROT_GROWSDOWN)
8163 && arg1 >= ts->info->stack_limit
8164 && arg1 <= ts->info->start_stack) {
8165 arg3 &= ~PROT_GROWSDOWN;
8166 arg2 = arg2 + arg1 - ts->info->stack_limit;
8167 arg1 = ts->info->stack_limit;
8170 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8172 #ifdef TARGET_NR_mremap
8173 case TARGET_NR_mremap:
8174 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8177 /* ??? msync/mlock/munlock are broken for softmmu. */
8178 #ifdef TARGET_NR_msync
8179 case TARGET_NR_msync:
8180 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8183 #ifdef TARGET_NR_mlock
8184 case TARGET_NR_mlock:
8185 ret = get_errno(mlock(g2h(arg1), arg2));
8188 #ifdef TARGET_NR_munlock
8189 case TARGET_NR_munlock:
8190 ret = get_errno(munlock(g2h(arg1), arg2));
8193 #ifdef TARGET_NR_mlockall
8194 case TARGET_NR_mlockall:
8195 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8198 #ifdef TARGET_NR_munlockall
8199 case TARGET_NR_munlockall:
8200 ret = get_errno(munlockall());
8203 case TARGET_NR_truncate:
8204 if (!(p = lock_user_string(arg1)))
8206 ret = get_errno(truncate(p, arg2));
8207 unlock_user(p, arg1, 0);
8209 case TARGET_NR_ftruncate:
8210 ret = get_errno(ftruncate(arg1, arg2));
8212 case TARGET_NR_fchmod:
8213 ret = get_errno(fchmod(arg1, arg2));
8215 #if defined(TARGET_NR_fchmodat)
8216 case TARGET_NR_fchmodat:
8217 if (!(p = lock_user_string(arg2)))
8219 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8220 unlock_user(p, arg2, 0);
8223 case TARGET_NR_getpriority:
8224 /* Note that negative values are valid for getpriority, so we must
8225 differentiate based on errno settings. */
8227 ret = getpriority(arg1, arg2);
8228 if (ret == -1 && errno != 0) {
8229 ret = -host_to_target_errno(errno);
8233 /* Return value is the unbiased priority. Signal no error. */
8234 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8236 /* Return value is a biased priority to avoid negative numbers. */
8240 case TARGET_NR_setpriority:
8241 ret = get_errno(setpriority(arg1, arg2, arg3));
8243 #ifdef TARGET_NR_profil
8244 case TARGET_NR_profil:
8247 case TARGET_NR_statfs:
8248 if (!(p = lock_user_string(arg1)))
8250 ret = get_errno(statfs(path(p), &stfs));
8251 unlock_user(p, arg1, 0);
8253 if (!is_error(ret)) {
8254 struct target_statfs *target_stfs;
8256 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8258 __put_user(stfs.f_type, &target_stfs->f_type);
8259 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8260 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8261 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8262 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8263 __put_user(stfs.f_files, &target_stfs->f_files);
8264 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8265 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8266 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8267 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8268 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8269 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8270 unlock_user_struct(target_stfs, arg2, 1);
8273 case TARGET_NR_fstatfs:
8274 ret = get_errno(fstatfs(arg1, &stfs));
8275 goto convert_statfs;
8276 #ifdef TARGET_NR_statfs64
8277 case TARGET_NR_statfs64:
8278 if (!(p = lock_user_string(arg1)))
8280 ret = get_errno(statfs(path(p), &stfs));
8281 unlock_user(p, arg1, 0);
8283 if (!is_error(ret)) {
8284 struct target_statfs64 *target_stfs;
8286 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8288 __put_user(stfs.f_type, &target_stfs->f_type);
8289 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8290 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8291 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8292 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8293 __put_user(stfs.f_files, &target_stfs->f_files);
8294 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8295 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8296 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8297 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8298 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8299 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8300 unlock_user_struct(target_stfs, arg3, 1);
8303 case TARGET_NR_fstatfs64:
8304 ret = get_errno(fstatfs(arg1, &stfs));
8305 goto convert_statfs64;
8307 #ifdef TARGET_NR_ioperm
8308 case TARGET_NR_ioperm:
8311 #ifdef TARGET_NR_socketcall
8312 case TARGET_NR_socketcall:
8313 ret = do_socketcall(arg1, arg2);
8316 #ifdef TARGET_NR_accept
8317 case TARGET_NR_accept:
8318 ret = do_accept4(arg1, arg2, arg3, 0);
8321 #ifdef TARGET_NR_accept4
8322 case TARGET_NR_accept4:
8323 #ifdef CONFIG_ACCEPT4
8324 ret = do_accept4(arg1, arg2, arg3, arg4);
8330 #ifdef TARGET_NR_bind
8331 case TARGET_NR_bind:
8332 ret = do_bind(arg1, arg2, arg3);
8335 #ifdef TARGET_NR_connect
8336 case TARGET_NR_connect:
8337 ret = do_connect(arg1, arg2, arg3);
8340 #ifdef TARGET_NR_getpeername
8341 case TARGET_NR_getpeername:
8342 ret = do_getpeername(arg1, arg2, arg3);
8345 #ifdef TARGET_NR_getsockname
8346 case TARGET_NR_getsockname:
8347 ret = do_getsockname(arg1, arg2, arg3);
8350 #ifdef TARGET_NR_getsockopt
8351 case TARGET_NR_getsockopt:
8352 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8355 #ifdef TARGET_NR_listen
8356 case TARGET_NR_listen:
8357 ret = get_errno(listen(arg1, arg2));
8360 #ifdef TARGET_NR_recv
8361 case TARGET_NR_recv:
8362 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8365 #ifdef TARGET_NR_recvfrom
8366 case TARGET_NR_recvfrom:
8367 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8370 #ifdef TARGET_NR_recvmsg
8371 case TARGET_NR_recvmsg:
8372 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8375 #ifdef TARGET_NR_send
8376 case TARGET_NR_send:
8377 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8380 #ifdef TARGET_NR_sendmsg
8381 case TARGET_NR_sendmsg:
8382 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8385 #ifdef TARGET_NR_sendmmsg
8386 case TARGET_NR_sendmmsg:
8387 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8389 case TARGET_NR_recvmmsg:
8390 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8393 #ifdef TARGET_NR_sendto
8394 case TARGET_NR_sendto:
8395 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8398 #ifdef TARGET_NR_shutdown
8399 case TARGET_NR_shutdown:
8400 ret = get_errno(shutdown(arg1, arg2));
8403 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8404 case TARGET_NR_getrandom:
8405 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8409 ret = get_errno(getrandom(p, arg2, arg3));
8410 unlock_user(p, arg1, ret);
8413 #ifdef TARGET_NR_socket
8414 case TARGET_NR_socket:
8415 ret = do_socket(arg1, arg2, arg3);
8416 fd_trans_unregister(ret);
8419 #ifdef TARGET_NR_socketpair
8420 case TARGET_NR_socketpair:
8421 ret = do_socketpair(arg1, arg2, arg3, arg4);
8424 #ifdef TARGET_NR_setsockopt
8425 case TARGET_NR_setsockopt:
8426 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8430 case TARGET_NR_syslog:
8431 if (!(p = lock_user_string(arg2)))
8433 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8434 unlock_user(p, arg2, 0);
8437 case TARGET_NR_setitimer:
8439 struct itimerval value, ovalue, *pvalue;
8443 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8444 || copy_from_user_timeval(&pvalue->it_value,
8445 arg2 + sizeof(struct target_timeval)))
8450 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8451 if (!is_error(ret) && arg3) {
8452 if (copy_to_user_timeval(arg3,
8453 &ovalue.it_interval)
8454 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8460 case TARGET_NR_getitimer:
8462 struct itimerval value;
8464 ret = get_errno(getitimer(arg1, &value));
8465 if (!is_error(ret) && arg2) {
8466 if (copy_to_user_timeval(arg2,
8468 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8474 #ifdef TARGET_NR_stat
8475 case TARGET_NR_stat:
8476 if (!(p = lock_user_string(arg1)))
8478 ret = get_errno(stat(path(p), &st));
8479 unlock_user(p, arg1, 0);
8482 #ifdef TARGET_NR_lstat
8483 case TARGET_NR_lstat:
8484 if (!(p = lock_user_string(arg1)))
8486 ret = get_errno(lstat(path(p), &st));
8487 unlock_user(p, arg1, 0);
8490 case TARGET_NR_fstat:
8492 ret = get_errno(fstat(arg1, &st));
8493 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8496 if (!is_error(ret)) {
8497 struct target_stat *target_st;
8499 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8501 memset(target_st, 0, sizeof(*target_st));
8502 __put_user(st.st_dev, &target_st->st_dev);
8503 __put_user(st.st_ino, &target_st->st_ino);
8504 __put_user(st.st_mode, &target_st->st_mode);
8505 __put_user(st.st_uid, &target_st->st_uid);
8506 __put_user(st.st_gid, &target_st->st_gid);
8507 __put_user(st.st_nlink, &target_st->st_nlink);
8508 __put_user(st.st_rdev, &target_st->st_rdev);
8509 __put_user(st.st_size, &target_st->st_size);
8510 __put_user(st.st_blksize, &target_st->st_blksize);
8511 __put_user(st.st_blocks, &target_st->st_blocks);
8512 __put_user(st.st_atime, &target_st->target_st_atime);
8513 __put_user(st.st_mtime, &target_st->target_st_mtime);
8514 __put_user(st.st_ctime, &target_st->target_st_ctime);
8515 unlock_user_struct(target_st, arg2, 1);
8519 #ifdef TARGET_NR_olduname
8520 case TARGET_NR_olduname:
8523 #ifdef TARGET_NR_iopl
8524 case TARGET_NR_iopl:
8527 case TARGET_NR_vhangup:
8528 ret = get_errno(vhangup());
8530 #ifdef TARGET_NR_idle
8531 case TARGET_NR_idle:
8534 #ifdef TARGET_NR_syscall
8535 case TARGET_NR_syscall:
8536 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8537 arg6, arg7, arg8, 0);
8540 case TARGET_NR_wait4:
8543 abi_long status_ptr = arg2;
8544 struct rusage rusage, *rusage_ptr;
8545 abi_ulong target_rusage = arg4;
8546 abi_long rusage_err;
8548 rusage_ptr = &rusage;
8551 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8552 if (!is_error(ret)) {
8553 if (status_ptr && ret) {
8554 status = host_to_target_waitstatus(status);
8555 if (put_user_s32(status, status_ptr))
8558 if (target_rusage) {
8559 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8567 #ifdef TARGET_NR_swapoff
8568 case TARGET_NR_swapoff:
8569 if (!(p = lock_user_string(arg1)))
8571 ret = get_errno(swapoff(p));
8572 unlock_user(p, arg1, 0);
8575 case TARGET_NR_sysinfo:
8577 struct target_sysinfo *target_value;
8578 struct sysinfo value;
8579 ret = get_errno(sysinfo(&value));
8580 if (!is_error(ret) && arg1)
8582 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8584 __put_user(value.uptime, &target_value->uptime);
8585 __put_user(value.loads[0], &target_value->loads[0]);
8586 __put_user(value.loads[1], &target_value->loads[1]);
8587 __put_user(value.loads[2], &target_value->loads[2]);
8588 __put_user(value.totalram, &target_value->totalram);
8589 __put_user(value.freeram, &target_value->freeram);
8590 __put_user(value.sharedram, &target_value->sharedram);
8591 __put_user(value.bufferram, &target_value->bufferram);
8592 __put_user(value.totalswap, &target_value->totalswap);
8593 __put_user(value.freeswap, &target_value->freeswap);
8594 __put_user(value.procs, &target_value->procs);
8595 __put_user(value.totalhigh, &target_value->totalhigh);
8596 __put_user(value.freehigh, &target_value->freehigh);
8597 __put_user(value.mem_unit, &target_value->mem_unit);
8598 unlock_user_struct(target_value, arg1, 1);
8602 #ifdef TARGET_NR_ipc
8604 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
8607 #ifdef TARGET_NR_semget
8608 case TARGET_NR_semget:
8609 ret = get_errno(semget(arg1, arg2, arg3));
8612 #ifdef TARGET_NR_semop
8613 case TARGET_NR_semop:
8614 ret = do_semop(arg1, arg2, arg3);
8617 #ifdef TARGET_NR_semctl
8618 case TARGET_NR_semctl:
8619 ret = do_semctl(arg1, arg2, arg3, arg4);
8622 #ifdef TARGET_NR_msgctl
8623 case TARGET_NR_msgctl:
8624 ret = do_msgctl(arg1, arg2, arg3);
8627 #ifdef TARGET_NR_msgget
8628 case TARGET_NR_msgget:
8629 ret = get_errno(msgget(arg1, arg2));
8632 #ifdef TARGET_NR_msgrcv
8633 case TARGET_NR_msgrcv:
8634 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8637 #ifdef TARGET_NR_msgsnd
8638 case TARGET_NR_msgsnd:
8639 ret = do_msgsnd(arg1, arg2, arg3, arg4);
8642 #ifdef TARGET_NR_shmget
8643 case TARGET_NR_shmget:
8644 ret = get_errno(shmget(arg1, arg2, arg3));
8647 #ifdef TARGET_NR_shmctl
8648 case TARGET_NR_shmctl:
8649 ret = do_shmctl(arg1, arg2, arg3);
8652 #ifdef TARGET_NR_shmat
8653 case TARGET_NR_shmat:
8654 ret = do_shmat(arg1, arg2, arg3);
8657 #ifdef TARGET_NR_shmdt
8658 case TARGET_NR_shmdt:
8659 ret = do_shmdt(arg1);
8662 case TARGET_NR_fsync:
8663 ret = get_errno(fsync(arg1));
8665 case TARGET_NR_clone:
8666 /* Linux manages to have three different orderings for its
8667 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8668 * match the kernel's CONFIG_CLONE_* settings.
8669 * Microblaze is further special in that it uses a sixth
8670 * implicit argument to clone for the TLS pointer.
8672 #if defined(TARGET_MICROBLAZE)
8673 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8674 #elif defined(TARGET_CLONE_BACKWARDS)
8675 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8676 #elif defined(TARGET_CLONE_BACKWARDS2)
8677 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8679 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8682 #ifdef __NR_exit_group
8683 /* new thread calls */
8684 case TARGET_NR_exit_group:
8688 gdb_exit(cpu_env, arg1);
8689 ret = get_errno(exit_group(arg1));
8692 case TARGET_NR_setdomainname:
8693 if (!(p = lock_user_string(arg1)))
8695 ret = get_errno(setdomainname(p, arg2));
8696 unlock_user(p, arg1, 0);
8698 case TARGET_NR_uname:
8699 /* no need to transcode because we use the linux syscall */
8701 struct new_utsname * buf;
8703 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8705 ret = get_errno(sys_uname(buf));
8706 if (!is_error(ret)) {
8707 /* Overrite the native machine name with whatever is being
8709 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
8710 /* Allow the user to override the reported release. */
8711 if (qemu_uname_release && *qemu_uname_release)
8712 strcpy (buf->release, qemu_uname_release);
8714 unlock_user_struct(buf, arg1, 1);
8718 case TARGET_NR_modify_ldt:
8719 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
8721 #if !defined(TARGET_X86_64)
8722 case TARGET_NR_vm86old:
8724 case TARGET_NR_vm86:
8725 ret = do_vm86(cpu_env, arg1, arg2);
8729 case TARGET_NR_adjtimex:
8731 #ifdef TARGET_NR_create_module
8732 case TARGET_NR_create_module:
8734 case TARGET_NR_init_module:
8735 case TARGET_NR_delete_module:
8736 #ifdef TARGET_NR_get_kernel_syms
8737 case TARGET_NR_get_kernel_syms:
8740 case TARGET_NR_quotactl:
8742 case TARGET_NR_getpgid:
8743 ret = get_errno(getpgid(arg1));
8745 case TARGET_NR_fchdir:
8746 ret = get_errno(fchdir(arg1));
8748 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8749 case TARGET_NR_bdflush:
8752 #ifdef TARGET_NR_sysfs
8753 case TARGET_NR_sysfs:
8756 case TARGET_NR_personality:
8757 ret = get_errno(personality(arg1));
8759 #ifdef TARGET_NR_afs_syscall
8760 case TARGET_NR_afs_syscall:
8763 #ifdef TARGET_NR__llseek /* Not on alpha */
8764 case TARGET_NR__llseek:
8767 #if !defined(__NR_llseek)
8768 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8770 ret = get_errno(res);
8775 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8777 if ((ret == 0) && put_user_s64(res, arg4)) {
8783 #ifdef TARGET_NR_getdents
8784 case TARGET_NR_getdents:
8785 #ifdef __NR_getdents
8786 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8788 struct target_dirent *target_dirp;
8789 struct linux_dirent *dirp;
8790 abi_long count = arg3;
8792 dirp = g_try_malloc(count);
8794 ret = -TARGET_ENOMEM;
8798 ret = get_errno(sys_getdents(arg1, dirp, count));
8799 if (!is_error(ret)) {
8800 struct linux_dirent *de;
8801 struct target_dirent *tde;
8803 int reclen, treclen;
8804 int count1, tnamelen;
8808 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8812 reclen = de->d_reclen;
8813 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8814 assert(tnamelen >= 0);
8815 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8816 assert(count1 + treclen <= count);
8817 tde->d_reclen = tswap16(treclen);
8818 tde->d_ino = tswapal(de->d_ino);
8819 tde->d_off = tswapal(de->d_off);
8820 memcpy(tde->d_name, de->d_name, tnamelen);
8821 de = (struct linux_dirent *)((char *)de + reclen);
8823 tde = (struct target_dirent *)((char *)tde + treclen);
8827 unlock_user(target_dirp, arg2, ret);
8833 struct linux_dirent *dirp;
8834 abi_long count = arg3;
8836 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8838 ret = get_errno(sys_getdents(arg1, dirp, count));
8839 if (!is_error(ret)) {
8840 struct linux_dirent *de;
8845 reclen = de->d_reclen;
8848 de->d_reclen = tswap16(reclen);
8849 tswapls(&de->d_ino);
8850 tswapls(&de->d_off);
8851 de = (struct linux_dirent *)((char *)de + reclen);
8855 unlock_user(dirp, arg2, ret);
8859 /* Implement getdents in terms of getdents64 */
8861 struct linux_dirent64 *dirp;
8862 abi_long count = arg3;
8864 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8868 ret = get_errno(sys_getdents64(arg1, dirp, count));
8869 if (!is_error(ret)) {
8870 /* Convert the dirent64 structs to target dirent. We do this
8871 * in-place, since we can guarantee that a target_dirent is no
8872 * larger than a dirent64; however this means we have to be
8873 * careful to read everything before writing in the new format.
8875 struct linux_dirent64 *de;
8876 struct target_dirent *tde;
8881 tde = (struct target_dirent *)dirp;
8883 int namelen, treclen;
8884 int reclen = de->d_reclen;
8885 uint64_t ino = de->d_ino;
8886 int64_t off = de->d_off;
8887 uint8_t type = de->d_type;
8889 namelen = strlen(de->d_name);
8890 treclen = offsetof(struct target_dirent, d_name)
8892 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8894 memmove(tde->d_name, de->d_name, namelen + 1);
8895 tde->d_ino = tswapal(ino);
8896 tde->d_off = tswapal(off);
8897 tde->d_reclen = tswap16(treclen);
8898 /* The target_dirent type is in what was formerly a padding
8899 * byte at the end of the structure:
8901 *(((char *)tde) + treclen - 1) = type;
8903 de = (struct linux_dirent64 *)((char *)de + reclen);
8904 tde = (struct target_dirent *)((char *)tde + treclen);
8910 unlock_user(dirp, arg2, ret);
8914 #endif /* TARGET_NR_getdents */
8915 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8916 case TARGET_NR_getdents64:
8918 struct linux_dirent64 *dirp;
8919 abi_long count = arg3;
8920 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8922 ret = get_errno(sys_getdents64(arg1, dirp, count));
8923 if (!is_error(ret)) {
8924 struct linux_dirent64 *de;
8929 reclen = de->d_reclen;
8932 de->d_reclen = tswap16(reclen);
8933 tswap64s((uint64_t *)&de->d_ino);
8934 tswap64s((uint64_t *)&de->d_off);
8935 de = (struct linux_dirent64 *)((char *)de + reclen);
8939 unlock_user(dirp, arg2, ret);
8942 #endif /* TARGET_NR_getdents64 */
8943 #if defined(TARGET_NR__newselect)
8944 case TARGET_NR__newselect:
8945 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8948 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8949 # ifdef TARGET_NR_poll
8950 case TARGET_NR_poll:
8952 # ifdef TARGET_NR_ppoll
8953 case TARGET_NR_ppoll:
8956 struct target_pollfd *target_pfd;
8957 unsigned int nfds = arg2;
8965 target_pfd = lock_user(VERIFY_WRITE, arg1,
8966 sizeof(struct target_pollfd) * nfds, 1);
8971 pfd = alloca(sizeof(struct pollfd) * nfds);
8972 for (i = 0; i < nfds; i++) {
8973 pfd[i].fd = tswap32(target_pfd[i].fd);
8974 pfd[i].events = tswap16(target_pfd[i].events);
8978 # ifdef TARGET_NR_ppoll
8979 if (num == TARGET_NR_ppoll) {
8980 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8981 target_sigset_t *target_set;
8982 sigset_t _set, *set = &_set;
8985 if (target_to_host_timespec(timeout_ts, arg3)) {
8986 unlock_user(target_pfd, arg1, 0);
8994 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8996 unlock_user(target_pfd, arg1, 0);
8999 target_to_host_sigset(set, target_set);
9004 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts,
9005 set, SIGSET_T_SIZE));
9007 if (!is_error(ret) && arg3) {
9008 host_to_target_timespec(arg3, timeout_ts);
9011 unlock_user(target_set, arg4, 0);
9015 ret = get_errno(poll(pfd, nfds, timeout));
9017 if (!is_error(ret)) {
9018 for(i = 0; i < nfds; i++) {
9019 target_pfd[i].revents = tswap16(pfd[i].revents);
9022 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9026 case TARGET_NR_flock:
9027 /* NOTE: the flock constant seems to be the same for every
9029 ret = get_errno(safe_flock(arg1, arg2));
9031 case TARGET_NR_readv:
9033 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9035 ret = get_errno(safe_readv(arg1, vec, arg3));
9036 unlock_iovec(vec, arg2, arg3, 1);
9038 ret = -host_to_target_errno(errno);
9042 case TARGET_NR_writev:
9044 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9046 ret = get_errno(safe_writev(arg1, vec, arg3));
9047 unlock_iovec(vec, arg2, arg3, 0);
9049 ret = -host_to_target_errno(errno);
9053 case TARGET_NR_getsid:
9054 ret = get_errno(getsid(arg1));
9056 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9057 case TARGET_NR_fdatasync:
9058 ret = get_errno(fdatasync(arg1));
9061 #ifdef TARGET_NR__sysctl
9062 case TARGET_NR__sysctl:
9063 /* We don't implement this, but ENOTDIR is always a safe
9065 ret = -TARGET_ENOTDIR;
9068 case TARGET_NR_sched_getaffinity:
9070 unsigned int mask_size;
9071 unsigned long *mask;
9074 * sched_getaffinity needs multiples of ulong, so need to take
9075 * care of mismatches between target ulong and host ulong sizes.
9077 if (arg2 & (sizeof(abi_ulong) - 1)) {
9078 ret = -TARGET_EINVAL;
9081 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9083 mask = alloca(mask_size);
9084 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9086 if (!is_error(ret)) {
9088 /* More data returned than the caller's buffer will fit.
9089 * This only happens if sizeof(abi_long) < sizeof(long)
9090 * and the caller passed us a buffer holding an odd number
9091 * of abi_longs. If the host kernel is actually using the
9092 * extra 4 bytes then fail EINVAL; otherwise we can just
9093 * ignore them and only copy the interesting part.
9095 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9096 if (numcpus > arg2 * 8) {
9097 ret = -TARGET_EINVAL;
9103 if (copy_to_user(arg3, mask, ret)) {
9109 case TARGET_NR_sched_setaffinity:
9111 unsigned int mask_size;
9112 unsigned long *mask;
9115 * sched_setaffinity needs multiples of ulong, so need to take
9116 * care of mismatches between target ulong and host ulong sizes.
9118 if (arg2 & (sizeof(abi_ulong) - 1)) {
9119 ret = -TARGET_EINVAL;
9122 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9124 mask = alloca(mask_size);
9125 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9128 memcpy(mask, p, arg2);
9129 unlock_user_struct(p, arg2, 0);
9131 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9134 case TARGET_NR_sched_setparam:
9136 struct sched_param *target_schp;
9137 struct sched_param schp;
9140 return -TARGET_EINVAL;
9142 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9144 schp.sched_priority = tswap32(target_schp->sched_priority);
9145 unlock_user_struct(target_schp, arg2, 0);
9146 ret = get_errno(sched_setparam(arg1, &schp));
9149 case TARGET_NR_sched_getparam:
9151 struct sched_param *target_schp;
9152 struct sched_param schp;
9155 return -TARGET_EINVAL;
9157 ret = get_errno(sched_getparam(arg1, &schp));
9158 if (!is_error(ret)) {
9159 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9161 target_schp->sched_priority = tswap32(schp.sched_priority);
9162 unlock_user_struct(target_schp, arg2, 1);
9166 case TARGET_NR_sched_setscheduler:
9168 struct sched_param *target_schp;
9169 struct sched_param schp;
9171 return -TARGET_EINVAL;
9173 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9175 schp.sched_priority = tswap32(target_schp->sched_priority);
9176 unlock_user_struct(target_schp, arg3, 0);
9177 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9180 case TARGET_NR_sched_getscheduler:
9181 ret = get_errno(sched_getscheduler(arg1));
9183 case TARGET_NR_sched_yield:
9184 ret = get_errno(sched_yield());
9186 case TARGET_NR_sched_get_priority_max:
9187 ret = get_errno(sched_get_priority_max(arg1));
9189 case TARGET_NR_sched_get_priority_min:
9190 ret = get_errno(sched_get_priority_min(arg1));
9192 case TARGET_NR_sched_rr_get_interval:
9195 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9196 if (!is_error(ret)) {
9197 ret = host_to_target_timespec(arg2, &ts);
9201 case TARGET_NR_nanosleep:
9203 struct timespec req, rem;
9204 target_to_host_timespec(&req, arg1);
9205 ret = get_errno(nanosleep(&req, &rem));
9206 if (is_error(ret) && arg2) {
9207 host_to_target_timespec(arg2, &rem);
9211 #ifdef TARGET_NR_query_module
9212 case TARGET_NR_query_module:
9215 #ifdef TARGET_NR_nfsservctl
9216 case TARGET_NR_nfsservctl:
9219 case TARGET_NR_prctl:
9221 case PR_GET_PDEATHSIG:
9224 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9225 if (!is_error(ret) && arg2
9226 && put_user_ual(deathsig, arg2)) {
9234 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9238 ret = get_errno(prctl(arg1, (unsigned long)name,
9240 unlock_user(name, arg2, 16);
9245 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9249 ret = get_errno(prctl(arg1, (unsigned long)name,
9251 unlock_user(name, arg2, 0);
9256 /* Most prctl options have no pointer arguments */
9257 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9261 #ifdef TARGET_NR_arch_prctl
9262 case TARGET_NR_arch_prctl:
9263 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9264 ret = do_arch_prctl(cpu_env, arg1, arg2);
9270 #ifdef TARGET_NR_pread64
9271 case TARGET_NR_pread64:
9272 if (regpairs_aligned(cpu_env)) {
9276 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9278 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9279 unlock_user(p, arg2, ret);
9281 case TARGET_NR_pwrite64:
9282 if (regpairs_aligned(cpu_env)) {
9286 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9288 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9289 unlock_user(p, arg2, 0);
9292 case TARGET_NR_getcwd:
9293 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9295 ret = get_errno(sys_getcwd1(p, arg2));
9296 unlock_user(p, arg1, ret);
9298 case TARGET_NR_capget:
9299 case TARGET_NR_capset:
9301 struct target_user_cap_header *target_header;
9302 struct target_user_cap_data *target_data = NULL;
9303 struct __user_cap_header_struct header;
9304 struct __user_cap_data_struct data[2];
9305 struct __user_cap_data_struct *dataptr = NULL;
9306 int i, target_datalen;
9309 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9312 header.version = tswap32(target_header->version);
9313 header.pid = tswap32(target_header->pid);
9315 if (header.version != _LINUX_CAPABILITY_VERSION) {
9316 /* Version 2 and up takes pointer to two user_data structs */
9320 target_datalen = sizeof(*target_data) * data_items;
9323 if (num == TARGET_NR_capget) {
9324 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9326 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9329 unlock_user_struct(target_header, arg1, 0);
9333 if (num == TARGET_NR_capset) {
9334 for (i = 0; i < data_items; i++) {
9335 data[i].effective = tswap32(target_data[i].effective);
9336 data[i].permitted = tswap32(target_data[i].permitted);
9337 data[i].inheritable = tswap32(target_data[i].inheritable);
9344 if (num == TARGET_NR_capget) {
9345 ret = get_errno(capget(&header, dataptr));
9347 ret = get_errno(capset(&header, dataptr));
9350 /* The kernel always updates version for both capget and capset */
9351 target_header->version = tswap32(header.version);
9352 unlock_user_struct(target_header, arg1, 1);
9355 if (num == TARGET_NR_capget) {
9356 for (i = 0; i < data_items; i++) {
9357 target_data[i].effective = tswap32(data[i].effective);
9358 target_data[i].permitted = tswap32(data[i].permitted);
9359 target_data[i].inheritable = tswap32(data[i].inheritable);
9361 unlock_user(target_data, arg2, target_datalen);
9363 unlock_user(target_data, arg2, 0);
9368 case TARGET_NR_sigaltstack:
9369 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9372 #ifdef CONFIG_SENDFILE
9373 case TARGET_NR_sendfile:
9378 ret = get_user_sal(off, arg3);
9379 if (is_error(ret)) {
9384 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9385 if (!is_error(ret) && arg3) {
9386 abi_long ret2 = put_user_sal(off, arg3);
9387 if (is_error(ret2)) {
9393 #ifdef TARGET_NR_sendfile64
9394 case TARGET_NR_sendfile64:
9399 ret = get_user_s64(off, arg3);
9400 if (is_error(ret)) {
9405 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9406 if (!is_error(ret) && arg3) {
9407 abi_long ret2 = put_user_s64(off, arg3);
9408 if (is_error(ret2)) {
9416 case TARGET_NR_sendfile:
9417 #ifdef TARGET_NR_sendfile64
9418 case TARGET_NR_sendfile64:
9423 #ifdef TARGET_NR_getpmsg
9424 case TARGET_NR_getpmsg:
9427 #ifdef TARGET_NR_putpmsg
9428 case TARGET_NR_putpmsg:
9431 #ifdef TARGET_NR_vfork
9432 case TARGET_NR_vfork:
9433 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
9437 #ifdef TARGET_NR_ugetrlimit
9438 case TARGET_NR_ugetrlimit:
9441 int resource = target_to_host_resource(arg1);
9442 ret = get_errno(getrlimit(resource, &rlim));
9443 if (!is_error(ret)) {
9444 struct target_rlimit *target_rlim;
9445 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9447 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9448 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9449 unlock_user_struct(target_rlim, arg2, 1);
9454 #ifdef TARGET_NR_truncate64
9455 case TARGET_NR_truncate64:
9456 if (!(p = lock_user_string(arg1)))
9458 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9459 unlock_user(p, arg1, 0);
9462 #ifdef TARGET_NR_ftruncate64
9463 case TARGET_NR_ftruncate64:
9464 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9467 #ifdef TARGET_NR_stat64
9468 case TARGET_NR_stat64:
9469 if (!(p = lock_user_string(arg1)))
9471 ret = get_errno(stat(path(p), &st));
9472 unlock_user(p, arg1, 0);
9474 ret = host_to_target_stat64(cpu_env, arg2, &st);
9477 #ifdef TARGET_NR_lstat64
9478 case TARGET_NR_lstat64:
9479 if (!(p = lock_user_string(arg1)))
9481 ret = get_errno(lstat(path(p), &st));
9482 unlock_user(p, arg1, 0);
9484 ret = host_to_target_stat64(cpu_env, arg2, &st);
9487 #ifdef TARGET_NR_fstat64
9488 case TARGET_NR_fstat64:
9489 ret = get_errno(fstat(arg1, &st));
9491 ret = host_to_target_stat64(cpu_env, arg2, &st);
9494 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9495 #ifdef TARGET_NR_fstatat64
9496 case TARGET_NR_fstatat64:
9498 #ifdef TARGET_NR_newfstatat
9499 case TARGET_NR_newfstatat:
9501 if (!(p = lock_user_string(arg2)))
9503 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9505 ret = host_to_target_stat64(cpu_env, arg3, &st);
9508 #ifdef TARGET_NR_lchown
9509 case TARGET_NR_lchown:
9510 if (!(p = lock_user_string(arg1)))
9512 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9513 unlock_user(p, arg1, 0);
9516 #ifdef TARGET_NR_getuid
9517 case TARGET_NR_getuid:
9518 ret = get_errno(high2lowuid(getuid()));
9521 #ifdef TARGET_NR_getgid
9522 case TARGET_NR_getgid:
9523 ret = get_errno(high2lowgid(getgid()));
9526 #ifdef TARGET_NR_geteuid
9527 case TARGET_NR_geteuid:
9528 ret = get_errno(high2lowuid(geteuid()));
9531 #ifdef TARGET_NR_getegid
9532 case TARGET_NR_getegid:
9533 ret = get_errno(high2lowgid(getegid()));
9536 case TARGET_NR_setreuid:
9537 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9539 case TARGET_NR_setregid:
9540 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9542 case TARGET_NR_getgroups:
9544 int gidsetsize = arg1;
9545 target_id *target_grouplist;
9549 grouplist = alloca(gidsetsize * sizeof(gid_t));
9550 ret = get_errno(getgroups(gidsetsize, grouplist));
9551 if (gidsetsize == 0)
9553 if (!is_error(ret)) {
9554 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9555 if (!target_grouplist)
9557 for(i = 0;i < ret; i++)
9558 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9559 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9563 case TARGET_NR_setgroups:
9565 int gidsetsize = arg1;
9566 target_id *target_grouplist;
9567 gid_t *grouplist = NULL;
9570 grouplist = alloca(gidsetsize * sizeof(gid_t));
9571 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9572 if (!target_grouplist) {
9573 ret = -TARGET_EFAULT;
9576 for (i = 0; i < gidsetsize; i++) {
9577 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9579 unlock_user(target_grouplist, arg2, 0);
9581 ret = get_errno(setgroups(gidsetsize, grouplist));
9584 case TARGET_NR_fchown:
9585 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9587 #if defined(TARGET_NR_fchownat)
9588 case TARGET_NR_fchownat:
9589 if (!(p = lock_user_string(arg2)))
9591 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9592 low2highgid(arg4), arg5));
9593 unlock_user(p, arg2, 0);
9596 #ifdef TARGET_NR_setresuid
9597 case TARGET_NR_setresuid:
9598 ret = get_errno(sys_setresuid(low2highuid(arg1),
9600 low2highuid(arg3)));
9603 #ifdef TARGET_NR_getresuid
9604 case TARGET_NR_getresuid:
9606 uid_t ruid, euid, suid;
9607 ret = get_errno(getresuid(&ruid, &euid, &suid));
9608 if (!is_error(ret)) {
9609 if (put_user_id(high2lowuid(ruid), arg1)
9610 || put_user_id(high2lowuid(euid), arg2)
9611 || put_user_id(high2lowuid(suid), arg3))
9617 #ifdef TARGET_NR_getresgid
9618 case TARGET_NR_setresgid:
9619 ret = get_errno(sys_setresgid(low2highgid(arg1),
9621 low2highgid(arg3)));
9624 #ifdef TARGET_NR_getresgid
9625 case TARGET_NR_getresgid:
9627 gid_t rgid, egid, sgid;
9628 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9629 if (!is_error(ret)) {
9630 if (put_user_id(high2lowgid(rgid), arg1)
9631 || put_user_id(high2lowgid(egid), arg2)
9632 || put_user_id(high2lowgid(sgid), arg3))
9638 #ifdef TARGET_NR_chown
9639 case TARGET_NR_chown:
9640 if (!(p = lock_user_string(arg1)))
9642 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9643 unlock_user(p, arg1, 0);
9646 case TARGET_NR_setuid:
9647 ret = get_errno(sys_setuid(low2highuid(arg1)));
9649 case TARGET_NR_setgid:
9650 ret = get_errno(sys_setgid(low2highgid(arg1)));
9652 case TARGET_NR_setfsuid:
9653 ret = get_errno(setfsuid(arg1));
9655 case TARGET_NR_setfsgid:
9656 ret = get_errno(setfsgid(arg1));
9659 #ifdef TARGET_NR_lchown32
9660 case TARGET_NR_lchown32:
9661 if (!(p = lock_user_string(arg1)))
9663 ret = get_errno(lchown(p, arg2, arg3));
9664 unlock_user(p, arg1, 0);
9667 #ifdef TARGET_NR_getuid32
9668 case TARGET_NR_getuid32:
9669 ret = get_errno(getuid());
9673 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9674 /* Alpha specific */
9675 case TARGET_NR_getxuid:
9679 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9681 ret = get_errno(getuid());
9684 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9685 /* Alpha specific */
9686 case TARGET_NR_getxgid:
9690 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9692 ret = get_errno(getgid());
9695 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9696 /* Alpha specific */
9697 case TARGET_NR_osf_getsysinfo:
9698 ret = -TARGET_EOPNOTSUPP;
9700 case TARGET_GSI_IEEE_FP_CONTROL:
9702 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9704 /* Copied from linux ieee_fpcr_to_swcr. */
9705 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9706 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9707 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9708 | SWCR_TRAP_ENABLE_DZE
9709 | SWCR_TRAP_ENABLE_OVF);
9710 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9711 | SWCR_TRAP_ENABLE_INE);
9712 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9713 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9715 if (put_user_u64 (swcr, arg2))
9721 /* case GSI_IEEE_STATE_AT_SIGNAL:
9722 -- Not implemented in linux kernel.
9724 -- Retrieves current unaligned access state; not much used.
9726 -- Retrieves implver information; surely not used.
9728 -- Grabs a copy of the HWRPB; surely not used.
9733 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9734 /* Alpha specific */
9735 case TARGET_NR_osf_setsysinfo:
9736 ret = -TARGET_EOPNOTSUPP;
9738 case TARGET_SSI_IEEE_FP_CONTROL:
9740 uint64_t swcr, fpcr, orig_fpcr;
9742 if (get_user_u64 (swcr, arg2)) {
9745 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9746 fpcr = orig_fpcr & FPCR_DYN_MASK;
9748 /* Copied from linux ieee_swcr_to_fpcr. */
9749 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9750 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9751 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9752 | SWCR_TRAP_ENABLE_DZE
9753 | SWCR_TRAP_ENABLE_OVF)) << 48;
9754 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9755 | SWCR_TRAP_ENABLE_INE)) << 57;
9756 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9757 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9759 cpu_alpha_store_fpcr(cpu_env, fpcr);
9764 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9766 uint64_t exc, fpcr, orig_fpcr;
9769 if (get_user_u64(exc, arg2)) {
9773 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9775 /* We only add to the exception status here. */
9776 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9778 cpu_alpha_store_fpcr(cpu_env, fpcr);
9781 /* Old exceptions are not signaled. */
9782 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9784 /* If any exceptions set by this call,
9785 and are unmasked, send a signal. */
9787 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9788 si_code = TARGET_FPE_FLTRES;
9790 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9791 si_code = TARGET_FPE_FLTUND;
9793 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9794 si_code = TARGET_FPE_FLTOVF;
9796 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9797 si_code = TARGET_FPE_FLTDIV;
9799 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9800 si_code = TARGET_FPE_FLTINV;
9803 target_siginfo_t info;
9804 info.si_signo = SIGFPE;
9806 info.si_code = si_code;
9807 info._sifields._sigfault._addr
9808 = ((CPUArchState *)cpu_env)->pc;
9809 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9814 /* case SSI_NVPAIRS:
9815 -- Used with SSIN_UACPROC to enable unaligned accesses.
9816 case SSI_IEEE_STATE_AT_SIGNAL:
9817 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9818 -- Not implemented in linux kernel
9823 #ifdef TARGET_NR_osf_sigprocmask
9824 /* Alpha specific. */
9825 case TARGET_NR_osf_sigprocmask:
9829 sigset_t set, oldset;
9832 case TARGET_SIG_BLOCK:
9835 case TARGET_SIG_UNBLOCK:
9838 case TARGET_SIG_SETMASK:
9842 ret = -TARGET_EINVAL;
9846 target_to_host_old_sigset(&set, &mask);
9847 ret = do_sigprocmask(how, &set, &oldset);
9849 host_to_target_old_sigset(&mask, &oldset);
9856 #ifdef TARGET_NR_getgid32
9857 case TARGET_NR_getgid32:
9858 ret = get_errno(getgid());
9861 #ifdef TARGET_NR_geteuid32
9862 case TARGET_NR_geteuid32:
9863 ret = get_errno(geteuid());
9866 #ifdef TARGET_NR_getegid32
9867 case TARGET_NR_getegid32:
9868 ret = get_errno(getegid());
9871 #ifdef TARGET_NR_setreuid32
9872 case TARGET_NR_setreuid32:
9873 ret = get_errno(setreuid(arg1, arg2));
9876 #ifdef TARGET_NR_setregid32
9877 case TARGET_NR_setregid32:
9878 ret = get_errno(setregid(arg1, arg2));
9881 #ifdef TARGET_NR_getgroups32
9882 case TARGET_NR_getgroups32:
9884 int gidsetsize = arg1;
9885 uint32_t *target_grouplist;
9889 grouplist = alloca(gidsetsize * sizeof(gid_t));
9890 ret = get_errno(getgroups(gidsetsize, grouplist));
9891 if (gidsetsize == 0)
9893 if (!is_error(ret)) {
9894 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9895 if (!target_grouplist) {
9896 ret = -TARGET_EFAULT;
9899 for(i = 0;i < ret; i++)
9900 target_grouplist[i] = tswap32(grouplist[i]);
9901 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9906 #ifdef TARGET_NR_setgroups32
9907 case TARGET_NR_setgroups32:
9909 int gidsetsize = arg1;
9910 uint32_t *target_grouplist;
9914 grouplist = alloca(gidsetsize * sizeof(gid_t));
9915 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9916 if (!target_grouplist) {
9917 ret = -TARGET_EFAULT;
9920 for(i = 0;i < gidsetsize; i++)
9921 grouplist[i] = tswap32(target_grouplist[i]);
9922 unlock_user(target_grouplist, arg2, 0);
9923 ret = get_errno(setgroups(gidsetsize, grouplist));
9927 #ifdef TARGET_NR_fchown32
9928 case TARGET_NR_fchown32:
9929 ret = get_errno(fchown(arg1, arg2, arg3));
9932 #ifdef TARGET_NR_setresuid32
9933 case TARGET_NR_setresuid32:
9934 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
9937 #ifdef TARGET_NR_getresuid32
9938 case TARGET_NR_getresuid32:
9940 uid_t ruid, euid, suid;
9941 ret = get_errno(getresuid(&ruid, &euid, &suid));
9942 if (!is_error(ret)) {
9943 if (put_user_u32(ruid, arg1)
9944 || put_user_u32(euid, arg2)
9945 || put_user_u32(suid, arg3))
9951 #ifdef TARGET_NR_setresgid32
9952 case TARGET_NR_setresgid32:
9953 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
9956 #ifdef TARGET_NR_getresgid32
9957 case TARGET_NR_getresgid32:
9959 gid_t rgid, egid, sgid;
9960 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9961 if (!is_error(ret)) {
9962 if (put_user_u32(rgid, arg1)
9963 || put_user_u32(egid, arg2)
9964 || put_user_u32(sgid, arg3))
9970 #ifdef TARGET_NR_chown32
9971 case TARGET_NR_chown32:
9972 if (!(p = lock_user_string(arg1)))
9974 ret = get_errno(chown(p, arg2, arg3));
9975 unlock_user(p, arg1, 0);
9978 #ifdef TARGET_NR_setuid32
9979 case TARGET_NR_setuid32:
9980 ret = get_errno(sys_setuid(arg1));
9983 #ifdef TARGET_NR_setgid32
9984 case TARGET_NR_setgid32:
9985 ret = get_errno(sys_setgid(arg1));
9988 #ifdef TARGET_NR_setfsuid32
9989 case TARGET_NR_setfsuid32:
9990 ret = get_errno(setfsuid(arg1));
9993 #ifdef TARGET_NR_setfsgid32
9994 case TARGET_NR_setfsgid32:
9995 ret = get_errno(setfsgid(arg1));
9999 case TARGET_NR_pivot_root:
10000 goto unimplemented;
10001 #ifdef TARGET_NR_mincore
10002 case TARGET_NR_mincore:
10005 ret = -TARGET_EFAULT;
10006 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10008 if (!(p = lock_user_string(arg3)))
10010 ret = get_errno(mincore(a, arg2, p));
10011 unlock_user(p, arg3, ret);
10013 unlock_user(a, arg1, 0);
10017 #ifdef TARGET_NR_arm_fadvise64_64
10018 case TARGET_NR_arm_fadvise64_64:
10019 /* arm_fadvise64_64 looks like fadvise64_64 but
10020 * with different argument order: fd, advice, offset, len
10021 * rather than the usual fd, offset, len, advice.
10022 * Note that offset and len are both 64-bit so appear as
10023 * pairs of 32-bit registers.
10025 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10026 target_offset64(arg5, arg6), arg2);
10027 ret = -host_to_target_errno(ret);
10031 #if TARGET_ABI_BITS == 32
10033 #ifdef TARGET_NR_fadvise64_64
10034 case TARGET_NR_fadvise64_64:
10035 /* 6 args: fd, offset (high, low), len (high, low), advice */
10036 if (regpairs_aligned(cpu_env)) {
10037 /* offset is in (3,4), len in (5,6) and advice in 7 */
10044 ret = -host_to_target_errno(posix_fadvise(arg1,
10045 target_offset64(arg2, arg3),
10046 target_offset64(arg4, arg5),
10051 #ifdef TARGET_NR_fadvise64
10052 case TARGET_NR_fadvise64:
10053 /* 5 args: fd, offset (high, low), len, advice */
10054 if (regpairs_aligned(cpu_env)) {
10055 /* offset is in (3,4), len in 5 and advice in 6 */
10061 ret = -host_to_target_errno(posix_fadvise(arg1,
10062 target_offset64(arg2, arg3),
10067 #else /* not a 32-bit ABI */
10068 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10069 #ifdef TARGET_NR_fadvise64_64
10070 case TARGET_NR_fadvise64_64:
10072 #ifdef TARGET_NR_fadvise64
10073 case TARGET_NR_fadvise64:
10075 #ifdef TARGET_S390X
10077 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10078 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10079 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10080 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10084 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10087 #endif /* end of 64-bit ABI fadvise handling */
10089 #ifdef TARGET_NR_madvise
10090 case TARGET_NR_madvise:
10091 /* A straight passthrough may not be safe because qemu sometimes
10092 turns private file-backed mappings into anonymous mappings.
10093 This will break MADV_DONTNEED.
10094 This is a hint, so ignoring and returning success is ok. */
10095 ret = get_errno(0);
10098 #if TARGET_ABI_BITS == 32
10099 case TARGET_NR_fcntl64:
10103 struct target_flock64 *target_fl;
10105 struct target_eabi_flock64 *target_efl;
10108 cmd = target_to_host_fcntl_cmd(arg2);
10109 if (cmd == -TARGET_EINVAL) {
10115 case TARGET_F_GETLK64:
10117 if (((CPUARMState *)cpu_env)->eabi) {
10118 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10120 fl.l_type = tswap16(target_efl->l_type);
10121 fl.l_whence = tswap16(target_efl->l_whence);
10122 fl.l_start = tswap64(target_efl->l_start);
10123 fl.l_len = tswap64(target_efl->l_len);
10124 fl.l_pid = tswap32(target_efl->l_pid);
10125 unlock_user_struct(target_efl, arg3, 0);
10129 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10131 fl.l_type = tswap16(target_fl->l_type);
10132 fl.l_whence = tswap16(target_fl->l_whence);
10133 fl.l_start = tswap64(target_fl->l_start);
10134 fl.l_len = tswap64(target_fl->l_len);
10135 fl.l_pid = tswap32(target_fl->l_pid);
10136 unlock_user_struct(target_fl, arg3, 0);
10138 ret = get_errno(fcntl(arg1, cmd, &fl));
10141 if (((CPUARMState *)cpu_env)->eabi) {
10142 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
10144 target_efl->l_type = tswap16(fl.l_type);
10145 target_efl->l_whence = tswap16(fl.l_whence);
10146 target_efl->l_start = tswap64(fl.l_start);
10147 target_efl->l_len = tswap64(fl.l_len);
10148 target_efl->l_pid = tswap32(fl.l_pid);
10149 unlock_user_struct(target_efl, arg3, 1);
10153 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
10155 target_fl->l_type = tswap16(fl.l_type);
10156 target_fl->l_whence = tswap16(fl.l_whence);
10157 target_fl->l_start = tswap64(fl.l_start);
10158 target_fl->l_len = tswap64(fl.l_len);
10159 target_fl->l_pid = tswap32(fl.l_pid);
10160 unlock_user_struct(target_fl, arg3, 1);
10165 case TARGET_F_SETLK64:
10166 case TARGET_F_SETLKW64:
10168 if (((CPUARMState *)cpu_env)->eabi) {
10169 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10171 fl.l_type = tswap16(target_efl->l_type);
10172 fl.l_whence = tswap16(target_efl->l_whence);
10173 fl.l_start = tswap64(target_efl->l_start);
10174 fl.l_len = tswap64(target_efl->l_len);
10175 fl.l_pid = tswap32(target_efl->l_pid);
10176 unlock_user_struct(target_efl, arg3, 0);
10180 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10182 fl.l_type = tswap16(target_fl->l_type);
10183 fl.l_whence = tswap16(target_fl->l_whence);
10184 fl.l_start = tswap64(target_fl->l_start);
10185 fl.l_len = tswap64(target_fl->l_len);
10186 fl.l_pid = tswap32(target_fl->l_pid);
10187 unlock_user_struct(target_fl, arg3, 0);
10189 ret = get_errno(fcntl(arg1, cmd, &fl));
10192 ret = do_fcntl(arg1, arg2, arg3);
10198 #ifdef TARGET_NR_cacheflush
10199 case TARGET_NR_cacheflush:
10200 /* self-modifying code is handled automatically, so nothing needed */
10204 #ifdef TARGET_NR_security
10205 case TARGET_NR_security:
10206 goto unimplemented;
10208 #ifdef TARGET_NR_getpagesize
10209 case TARGET_NR_getpagesize:
10210 ret = TARGET_PAGE_SIZE;
10213 case TARGET_NR_gettid:
10214 ret = get_errno(gettid());
10216 #ifdef TARGET_NR_readahead
10217 case TARGET_NR_readahead:
10218 #if TARGET_ABI_BITS == 32
10219 if (regpairs_aligned(cpu_env)) {
10224 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10226 ret = get_errno(readahead(arg1, arg2, arg3));
10231 #ifdef TARGET_NR_setxattr
10232 case TARGET_NR_listxattr:
10233 case TARGET_NR_llistxattr:
10237 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10239 ret = -TARGET_EFAULT;
10243 p = lock_user_string(arg1);
10245 if (num == TARGET_NR_listxattr) {
10246 ret = get_errno(listxattr(p, b, arg3));
10248 ret = get_errno(llistxattr(p, b, arg3));
10251 ret = -TARGET_EFAULT;
10253 unlock_user(p, arg1, 0);
10254 unlock_user(b, arg2, arg3);
10257 case TARGET_NR_flistxattr:
10261 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10263 ret = -TARGET_EFAULT;
10267 ret = get_errno(flistxattr(arg1, b, arg3));
10268 unlock_user(b, arg2, arg3);
10271 case TARGET_NR_setxattr:
10272 case TARGET_NR_lsetxattr:
10274 void *p, *n, *v = 0;
10276 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10278 ret = -TARGET_EFAULT;
10282 p = lock_user_string(arg1);
10283 n = lock_user_string(arg2);
10285 if (num == TARGET_NR_setxattr) {
10286 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10288 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10291 ret = -TARGET_EFAULT;
10293 unlock_user(p, arg1, 0);
10294 unlock_user(n, arg2, 0);
10295 unlock_user(v, arg3, 0);
10298 case TARGET_NR_fsetxattr:
10302 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10304 ret = -TARGET_EFAULT;
10308 n = lock_user_string(arg2);
10310 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10312 ret = -TARGET_EFAULT;
10314 unlock_user(n, arg2, 0);
10315 unlock_user(v, arg3, 0);
10318 case TARGET_NR_getxattr:
10319 case TARGET_NR_lgetxattr:
10321 void *p, *n, *v = 0;
10323 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10325 ret = -TARGET_EFAULT;
10329 p = lock_user_string(arg1);
10330 n = lock_user_string(arg2);
10332 if (num == TARGET_NR_getxattr) {
10333 ret = get_errno(getxattr(p, n, v, arg4));
10335 ret = get_errno(lgetxattr(p, n, v, arg4));
10338 ret = -TARGET_EFAULT;
10340 unlock_user(p, arg1, 0);
10341 unlock_user(n, arg2, 0);
10342 unlock_user(v, arg3, arg4);
10345 case TARGET_NR_fgetxattr:
10349 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10351 ret = -TARGET_EFAULT;
10355 n = lock_user_string(arg2);
10357 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10359 ret = -TARGET_EFAULT;
10361 unlock_user(n, arg2, 0);
10362 unlock_user(v, arg3, arg4);
10365 case TARGET_NR_removexattr:
10366 case TARGET_NR_lremovexattr:
10369 p = lock_user_string(arg1);
10370 n = lock_user_string(arg2);
10372 if (num == TARGET_NR_removexattr) {
10373 ret = get_errno(removexattr(p, n));
10375 ret = get_errno(lremovexattr(p, n));
10378 ret = -TARGET_EFAULT;
10380 unlock_user(p, arg1, 0);
10381 unlock_user(n, arg2, 0);
10384 case TARGET_NR_fremovexattr:
10387 n = lock_user_string(arg2);
10389 ret = get_errno(fremovexattr(arg1, n));
10391 ret = -TARGET_EFAULT;
10393 unlock_user(n, arg2, 0);
10397 #endif /* CONFIG_ATTR */
10398 #ifdef TARGET_NR_set_thread_area
10399 case TARGET_NR_set_thread_area:
10400 #if defined(TARGET_MIPS)
10401 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10404 #elif defined(TARGET_CRIS)
10406 ret = -TARGET_EINVAL;
10408 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10412 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10413 ret = do_set_thread_area(cpu_env, arg1);
10415 #elif defined(TARGET_M68K)
10417 TaskState *ts = cpu->opaque;
10418 ts->tp_value = arg1;
10423 goto unimplemented_nowarn;
10426 #ifdef TARGET_NR_get_thread_area
10427 case TARGET_NR_get_thread_area:
10428 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10429 ret = do_get_thread_area(cpu_env, arg1);
10431 #elif defined(TARGET_M68K)
10433 TaskState *ts = cpu->opaque;
10434 ret = ts->tp_value;
10438 goto unimplemented_nowarn;
10441 #ifdef TARGET_NR_getdomainname
10442 case TARGET_NR_getdomainname:
10443 goto unimplemented_nowarn;
10446 #ifdef TARGET_NR_clock_gettime
10447 case TARGET_NR_clock_gettime:
10449 struct timespec ts;
10450 ret = get_errno(clock_gettime(arg1, &ts));
10451 if (!is_error(ret)) {
10452 host_to_target_timespec(arg2, &ts);
10457 #ifdef TARGET_NR_clock_getres
10458 case TARGET_NR_clock_getres:
10460 struct timespec ts;
10461 ret = get_errno(clock_getres(arg1, &ts));
10462 if (!is_error(ret)) {
10463 host_to_target_timespec(arg2, &ts);
10468 #ifdef TARGET_NR_clock_nanosleep
10469 case TARGET_NR_clock_nanosleep:
10471 struct timespec ts;
10472 target_to_host_timespec(&ts, arg3);
10473 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
10475 host_to_target_timespec(arg4, &ts);
10477 #if defined(TARGET_PPC)
10478 /* clock_nanosleep is odd in that it returns positive errno values.
10479 * On PPC, CR0 bit 3 should be set in such a situation. */
10481 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10488 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10489 case TARGET_NR_set_tid_address:
10490 ret = get_errno(set_tid_address((int *)g2h(arg1)));
10494 case TARGET_NR_tkill:
10495 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10498 case TARGET_NR_tgkill:
10499 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
10500 target_to_host_signal(arg3)));
10503 #ifdef TARGET_NR_set_robust_list
10504 case TARGET_NR_set_robust_list:
10505 case TARGET_NR_get_robust_list:
10506 /* The ABI for supporting robust futexes has userspace pass
10507 * the kernel a pointer to a linked list which is updated by
10508 * userspace after the syscall; the list is walked by the kernel
10509 * when the thread exits. Since the linked list in QEMU guest
10510 * memory isn't a valid linked list for the host and we have
10511 * no way to reliably intercept the thread-death event, we can't
10512 * support these. Silently return ENOSYS so that guest userspace
10513 * falls back to a non-robust futex implementation (which should
10514 * be OK except in the corner case of the guest crashing while
10515 * holding a mutex that is shared with another process via
10518 goto unimplemented_nowarn;
10521 #if defined(TARGET_NR_utimensat)
10522 case TARGET_NR_utimensat:
10524 struct timespec *tsp, ts[2];
10528 target_to_host_timespec(ts, arg3);
10529 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10533 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10535 if (!(p = lock_user_string(arg2))) {
10536 ret = -TARGET_EFAULT;
10539 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10540 unlock_user(p, arg2, 0);
10545 case TARGET_NR_futex:
10546 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10548 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10549 case TARGET_NR_inotify_init:
10550 ret = get_errno(sys_inotify_init());
10553 #ifdef CONFIG_INOTIFY1
10554 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10555 case TARGET_NR_inotify_init1:
10556 ret = get_errno(sys_inotify_init1(arg1));
10560 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10561 case TARGET_NR_inotify_add_watch:
10562 p = lock_user_string(arg2);
10563 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10564 unlock_user(p, arg2, 0);
10567 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10568 case TARGET_NR_inotify_rm_watch:
10569 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
10573 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10574 case TARGET_NR_mq_open:
10576 struct mq_attr posix_mq_attr, *attrp;
10578 p = lock_user_string(arg1 - 1);
10580 copy_from_user_mq_attr (&posix_mq_attr, arg4);
10581 attrp = &posix_mq_attr;
10585 ret = get_errno(mq_open(p, arg2, arg3, attrp));
10586 unlock_user (p, arg1, 0);
10590 case TARGET_NR_mq_unlink:
10591 p = lock_user_string(arg1 - 1);
10592 ret = get_errno(mq_unlink(p));
10593 unlock_user (p, arg1, 0);
10596 case TARGET_NR_mq_timedsend:
10598 struct timespec ts;
10600 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10602 target_to_host_timespec(&ts, arg5);
10603 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10604 host_to_target_timespec(arg5, &ts);
10606 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10608 unlock_user (p, arg2, arg3);
10612 case TARGET_NR_mq_timedreceive:
10614 struct timespec ts;
10617 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10619 target_to_host_timespec(&ts, arg5);
10620 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10622 host_to_target_timespec(arg5, &ts);
10624 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10627 unlock_user (p, arg2, arg3);
10629 put_user_u32(prio, arg4);
10633 /* Not implemented for now... */
10634 /* case TARGET_NR_mq_notify: */
10637 case TARGET_NR_mq_getsetattr:
10639 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10642 ret = mq_getattr(arg1, &posix_mq_attr_out);
10643 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10646 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10647 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
10654 #ifdef CONFIG_SPLICE
10655 #ifdef TARGET_NR_tee
10656 case TARGET_NR_tee:
10658 ret = get_errno(tee(arg1,arg2,arg3,arg4));
10662 #ifdef TARGET_NR_splice
10663 case TARGET_NR_splice:
10665 loff_t loff_in, loff_out;
10666 loff_t *ploff_in = NULL, *ploff_out = NULL;
10668 if (get_user_u64(loff_in, arg2)) {
10671 ploff_in = &loff_in;
10674 if (get_user_u64(loff_out, arg4)) {
10677 ploff_out = &loff_out;
10679 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10681 if (put_user_u64(loff_in, arg2)) {
10686 if (put_user_u64(loff_out, arg4)) {
10693 #ifdef TARGET_NR_vmsplice
10694 case TARGET_NR_vmsplice:
10696 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10698 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10699 unlock_iovec(vec, arg2, arg3, 0);
10701 ret = -host_to_target_errno(errno);
10706 #endif /* CONFIG_SPLICE */
10707 #ifdef CONFIG_EVENTFD
10708 #if defined(TARGET_NR_eventfd)
10709 case TARGET_NR_eventfd:
10710 ret = get_errno(eventfd(arg1, 0));
10711 fd_trans_unregister(ret);
10714 #if defined(TARGET_NR_eventfd2)
10715 case TARGET_NR_eventfd2:
10717 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10718 if (arg2 & TARGET_O_NONBLOCK) {
10719 host_flags |= O_NONBLOCK;
10721 if (arg2 & TARGET_O_CLOEXEC) {
10722 host_flags |= O_CLOEXEC;
10724 ret = get_errno(eventfd(arg1, host_flags));
10725 fd_trans_unregister(ret);
10729 #endif /* CONFIG_EVENTFD */
10730 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10731 case TARGET_NR_fallocate:
10732 #if TARGET_ABI_BITS == 32
10733 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10734 target_offset64(arg5, arg6)));
10736 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10740 #if defined(CONFIG_SYNC_FILE_RANGE)
10741 #if defined(TARGET_NR_sync_file_range)
10742 case TARGET_NR_sync_file_range:
10743 #if TARGET_ABI_BITS == 32
10744 #if defined(TARGET_MIPS)
10745 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10746 target_offset64(arg5, arg6), arg7));
10748 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10749 target_offset64(arg4, arg5), arg6));
10750 #endif /* !TARGET_MIPS */
10752 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10756 #if defined(TARGET_NR_sync_file_range2)
10757 case TARGET_NR_sync_file_range2:
10758 /* This is like sync_file_range but the arguments are reordered */
10759 #if TARGET_ABI_BITS == 32
10760 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10761 target_offset64(arg5, arg6), arg2));
10763 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10768 #if defined(TARGET_NR_signalfd4)
10769 case TARGET_NR_signalfd4:
10770 ret = do_signalfd4(arg1, arg2, arg4);
10773 #if defined(TARGET_NR_signalfd)
10774 case TARGET_NR_signalfd:
10775 ret = do_signalfd4(arg1, arg2, 0);
10778 #if defined(CONFIG_EPOLL)
10779 #if defined(TARGET_NR_epoll_create)
10780 case TARGET_NR_epoll_create:
10781 ret = get_errno(epoll_create(arg1));
10784 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10785 case TARGET_NR_epoll_create1:
10786 ret = get_errno(epoll_create1(arg1));
10789 #if defined(TARGET_NR_epoll_ctl)
10790 case TARGET_NR_epoll_ctl:
10792 struct epoll_event ep;
10793 struct epoll_event *epp = 0;
10795 struct target_epoll_event *target_ep;
10796 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10799 ep.events = tswap32(target_ep->events);
10800 /* The epoll_data_t union is just opaque data to the kernel,
10801 * so we transfer all 64 bits across and need not worry what
10802 * actual data type it is.
10804 ep.data.u64 = tswap64(target_ep->data.u64);
10805 unlock_user_struct(target_ep, arg4, 0);
10808 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10813 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10814 #define IMPLEMENT_EPOLL_PWAIT
10816 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10817 #if defined(TARGET_NR_epoll_wait)
10818 case TARGET_NR_epoll_wait:
10820 #if defined(IMPLEMENT_EPOLL_PWAIT)
10821 case TARGET_NR_epoll_pwait:
10824 struct target_epoll_event *target_ep;
10825 struct epoll_event *ep;
10827 int maxevents = arg3;
10828 int timeout = arg4;
10830 target_ep = lock_user(VERIFY_WRITE, arg2,
10831 maxevents * sizeof(struct target_epoll_event), 1);
10836 ep = alloca(maxevents * sizeof(struct epoll_event));
10839 #if defined(IMPLEMENT_EPOLL_PWAIT)
10840 case TARGET_NR_epoll_pwait:
10842 target_sigset_t *target_set;
10843 sigset_t _set, *set = &_set;
10846 target_set = lock_user(VERIFY_READ, arg5,
10847 sizeof(target_sigset_t), 1);
10849 unlock_user(target_ep, arg2, 0);
10852 target_to_host_sigset(set, target_set);
10853 unlock_user(target_set, arg5, 0);
10858 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
10862 #if defined(TARGET_NR_epoll_wait)
10863 case TARGET_NR_epoll_wait:
10864 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
10868 ret = -TARGET_ENOSYS;
10870 if (!is_error(ret)) {
10872 for (i = 0; i < ret; i++) {
10873 target_ep[i].events = tswap32(ep[i].events);
10874 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10877 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10882 #ifdef TARGET_NR_prlimit64
10883 case TARGET_NR_prlimit64:
10885 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10886 struct target_rlimit64 *target_rnew, *target_rold;
10887 struct host_rlimit64 rnew, rold, *rnewp = 0;
10888 int resource = target_to_host_resource(arg2);
10890 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10893 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10894 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10895 unlock_user_struct(target_rnew, arg3, 0);
10899 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10900 if (!is_error(ret) && arg4) {
10901 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10904 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10905 target_rold->rlim_max = tswap64(rold.rlim_max);
10906 unlock_user_struct(target_rold, arg4, 1);
10911 #ifdef TARGET_NR_gethostname
10912 case TARGET_NR_gethostname:
10914 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10916 ret = get_errno(gethostname(name, arg2));
10917 unlock_user(name, arg1, arg2);
10919 ret = -TARGET_EFAULT;
10924 #ifdef TARGET_NR_atomic_cmpxchg_32
10925 case TARGET_NR_atomic_cmpxchg_32:
10927 /* should use start_exclusive from main.c */
10928 abi_ulong mem_value;
10929 if (get_user_u32(mem_value, arg6)) {
10930 target_siginfo_t info;
10931 info.si_signo = SIGSEGV;
10933 info.si_code = TARGET_SEGV_MAPERR;
10934 info._sifields._sigfault._addr = arg6;
10935 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10939 if (mem_value == arg2)
10940 put_user_u32(arg1, arg6);
10945 #ifdef TARGET_NR_atomic_barrier
10946 case TARGET_NR_atomic_barrier:
10948 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10954 #ifdef TARGET_NR_timer_create
10955 case TARGET_NR_timer_create:
10957 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10959 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10962 int timer_index = next_free_host_timer();
10964 if (timer_index < 0) {
10965 ret = -TARGET_EAGAIN;
10967 timer_t *phtimer = g_posix_timers + timer_index;
10970 phost_sevp = &host_sevp;
10971 ret = target_to_host_sigevent(phost_sevp, arg2);
10977 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10981 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10990 #ifdef TARGET_NR_timer_settime
10991 case TARGET_NR_timer_settime:
10993 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10994 * struct itimerspec * old_value */
10995 target_timer_t timerid = get_timer_id(arg1);
10999 } else if (arg3 == 0) {
11000 ret = -TARGET_EINVAL;
11002 timer_t htimer = g_posix_timers[timerid];
11003 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11005 target_to_host_itimerspec(&hspec_new, arg3);
11007 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11008 host_to_target_itimerspec(arg2, &hspec_old);
11014 #ifdef TARGET_NR_timer_gettime
11015 case TARGET_NR_timer_gettime:
11017 /* args: timer_t timerid, struct itimerspec *curr_value */
11018 target_timer_t timerid = get_timer_id(arg1);
11022 } else if (!arg2) {
11023 ret = -TARGET_EFAULT;
11025 timer_t htimer = g_posix_timers[timerid];
11026 struct itimerspec hspec;
11027 ret = get_errno(timer_gettime(htimer, &hspec));
11029 if (host_to_target_itimerspec(arg2, &hspec)) {
11030 ret = -TARGET_EFAULT;
11037 #ifdef TARGET_NR_timer_getoverrun
11038 case TARGET_NR_timer_getoverrun:
11040 /* args: timer_t timerid */
11041 target_timer_t timerid = get_timer_id(arg1);
11046 timer_t htimer = g_posix_timers[timerid];
11047 ret = get_errno(timer_getoverrun(htimer));
11049 fd_trans_unregister(ret);
11054 #ifdef TARGET_NR_timer_delete
11055 case TARGET_NR_timer_delete:
11057 /* args: timer_t timerid */
11058 target_timer_t timerid = get_timer_id(arg1);
11063 timer_t htimer = g_posix_timers[timerid];
11064 ret = get_errno(timer_delete(htimer));
11065 g_posix_timers[timerid] = 0;
11071 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11072 case TARGET_NR_timerfd_create:
11073 ret = get_errno(timerfd_create(arg1,
11074 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11078 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11079 case TARGET_NR_timerfd_gettime:
11081 struct itimerspec its_curr;
11083 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11085 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11092 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11093 case TARGET_NR_timerfd_settime:
11095 struct itimerspec its_new, its_old, *p_new;
11098 if (target_to_host_itimerspec(&its_new, arg3)) {
11106 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11108 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11115 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11116 case TARGET_NR_ioprio_get:
11117 ret = get_errno(ioprio_get(arg1, arg2));
11121 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11122 case TARGET_NR_ioprio_set:
11123 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11127 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11128 case TARGET_NR_setns:
11129 ret = get_errno(setns(arg1, arg2));
11132 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11133 case TARGET_NR_unshare:
11134 ret = get_errno(unshare(arg1));
11140 gemu_log("qemu: Unsupported syscall: %d\n", num);
11141 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11142 unimplemented_nowarn:
11144 ret = -TARGET_ENOSYS;
11149 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11152 print_syscall_ret(num, ret);
11155 ret = -TARGET_EFAULT;