4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
37 #include <linux/capability.h>
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include "linux_loop.h"
109 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
110 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
113 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
114 * once. This exercises the codepaths for restart.
116 //#define DEBUG_ERESTARTSYS
118 //#include <linux/msdos_fs.h>
119 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
120 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
131 #define _syscall0(type,name) \
132 static type name (void) \
134 return syscall(__NR_##name); \
137 #define _syscall1(type,name,type1,arg1) \
138 static type name (type1 arg1) \
140 return syscall(__NR_##name, arg1); \
143 #define _syscall2(type,name,type1,arg1,type2,arg2) \
144 static type name (type1 arg1,type2 arg2) \
146 return syscall(__NR_##name, arg1, arg2); \
149 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
150 static type name (type1 arg1,type2 arg2,type3 arg3) \
152 return syscall(__NR_##name, arg1, arg2, arg3); \
155 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
161 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
165 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
169 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
170 type5,arg5,type6,arg6) \
171 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
178 #define __NR_sys_uname __NR_uname
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
184 #define __NR_sys_syslog __NR_syslog
185 #define __NR_sys_tgkill __NR_tgkill
186 #define __NR_sys_tkill __NR_tkill
187 #define __NR_sys_futex __NR_futex
188 #define __NR_sys_inotify_init __NR_inotify_init
189 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
190 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
192 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
194 #define __NR__llseek __NR_lseek
197 /* Newer kernel ports have llseek() instead of _llseek() */
198 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
199 #define TARGET_NR__llseek TARGET_NR_llseek
203 _syscall0(int, gettid)
205 /* This is a replacement for the host gettid() and must return a host
207 static int gettid(void) {
211 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
212 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
214 #if !defined(__NR_getdents) || \
215 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
218 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
219 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
220 loff_t *, res, uint, wh);
222 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
223 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
224 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
225 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
227 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
228 _syscall2(int,sys_tkill,int,tid,int,sig)
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group,int,error_code)
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address,int *,tidptr)
236 #if defined(TARGET_NR_futex) && defined(__NR_futex)
237 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
238 const struct timespec *,timeout,int *,uaddr2,int,val3)
240 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
241 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
244 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
248 _syscall2(int, capget, struct __user_cap_header_struct *, header,
249 struct __user_cap_data_struct *, data);
250 _syscall2(int, capset, struct __user_cap_header_struct *, header,
251 struct __user_cap_data_struct *, data);
252 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
253 _syscall2(int, ioprio_get, int, which, int, who)
255 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
256 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
258 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
259 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
262 static bitmask_transtbl fcntl_flags_tbl[] = {
263 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
264 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
265 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
266 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
267 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
268 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
269 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
270 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
271 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
272 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
273 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
274 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
275 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
276 #if defined(O_DIRECT)
277 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
279 #if defined(O_NOATIME)
280 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
282 #if defined(O_CLOEXEC)
283 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
286 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
288 /* Don't terminate the list prematurely on 64-bit host+guest. */
289 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
290 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
295 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
296 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
297 typedef struct TargetFdTrans {
298 TargetFdDataFunc host_to_target_data;
299 TargetFdDataFunc target_to_host_data;
300 TargetFdAddrFunc target_to_host_addr;
303 static TargetFdTrans **target_fd_trans;
305 static unsigned int target_fd_max;
307 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
309 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
310 return target_fd_trans[fd]->host_to_target_data;
315 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
317 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
318 return target_fd_trans[fd]->target_to_host_addr;
323 static void fd_trans_register(int fd, TargetFdTrans *trans)
327 if (fd >= target_fd_max) {
328 oldmax = target_fd_max;
329 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
330 target_fd_trans = g_renew(TargetFdTrans *,
331 target_fd_trans, target_fd_max);
332 memset((void *)(target_fd_trans + oldmax), 0,
333 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
335 target_fd_trans[fd] = trans;
338 static void fd_trans_unregister(int fd)
340 if (fd >= 0 && fd < target_fd_max) {
341 target_fd_trans[fd] = NULL;
345 static void fd_trans_dup(int oldfd, int newfd)
347 fd_trans_unregister(newfd);
348 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
349 fd_trans_register(newfd, target_fd_trans[oldfd]);
353 static int sys_getcwd1(char *buf, size_t size)
355 if (getcwd(buf, size) == NULL) {
356 /* getcwd() sets errno */
359 return strlen(buf)+1;
362 #ifdef TARGET_NR_utimensat
363 #ifdef CONFIG_UTIMENSAT
364 static int sys_utimensat(int dirfd, const char *pathname,
365 const struct timespec times[2], int flags)
367 if (pathname == NULL)
368 return futimens(dirfd, times);
370 return utimensat(dirfd, pathname, times, flags);
372 #elif defined(__NR_utimensat)
373 #define __NR_sys_utimensat __NR_utimensat
374 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
375 const struct timespec *,tsp,int,flags)
377 static int sys_utimensat(int dirfd, const char *pathname,
378 const struct timespec times[2], int flags)
384 #endif /* TARGET_NR_utimensat */
386 #ifdef CONFIG_INOTIFY
387 #include <sys/inotify.h>
389 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
390 static int sys_inotify_init(void)
392 return (inotify_init());
395 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
396 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
398 return (inotify_add_watch(fd, pathname, mask));
401 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
402 static int sys_inotify_rm_watch(int fd, int32_t wd)
404 return (inotify_rm_watch(fd, wd));
407 #ifdef CONFIG_INOTIFY1
408 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
409 static int sys_inotify_init1(int flags)
411 return (inotify_init1(flags));
416 /* Userspace can usually survive runtime without inotify */
417 #undef TARGET_NR_inotify_init
418 #undef TARGET_NR_inotify_init1
419 #undef TARGET_NR_inotify_add_watch
420 #undef TARGET_NR_inotify_rm_watch
421 #endif /* CONFIG_INOTIFY */
423 #if defined(TARGET_NR_ppoll)
425 # define __NR_ppoll -1
427 #define __NR_sys_ppoll __NR_ppoll
428 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
429 struct timespec *, timeout, const sigset_t *, sigmask,
433 #if defined(TARGET_NR_prlimit64)
434 #ifndef __NR_prlimit64
435 # define __NR_prlimit64 -1
437 #define __NR_sys_prlimit64 __NR_prlimit64
438 /* The glibc rlimit structure may not be that used by the underlying syscall */
439 struct host_rlimit64 {
443 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
444 const struct host_rlimit64 *, new_limit,
445 struct host_rlimit64 *, old_limit)
449 #if defined(TARGET_NR_timer_create)
450 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
451 static timer_t g_posix_timers[32] = { 0, } ;
453 static inline int next_free_host_timer(void)
456 /* FIXME: Does finding the next free slot require a lock? */
457 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
458 if (g_posix_timers[k] == 0) {
459 g_posix_timers[k] = (timer_t) 1;
467 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
469 static inline int regpairs_aligned(void *cpu_env) {
470 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
472 #elif defined(TARGET_MIPS)
473 static inline int regpairs_aligned(void *cpu_env) { return 1; }
474 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
475 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
476 * of registers which translates to the same as ARM/MIPS, because we start with
478 static inline int regpairs_aligned(void *cpu_env) { return 1; }
480 static inline int regpairs_aligned(void *cpu_env) { return 0; }
483 #define ERRNO_TABLE_SIZE 1200
485 /* target_to_host_errno_table[] is initialized from
486 * host_to_target_errno_table[] in syscall_init(). */
487 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
491 * This list is the union of errno values overridden in asm-<arch>/errno.h
492 * minus the errnos that are not actually generic to all archs.
494 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
495 [EAGAIN] = TARGET_EAGAIN,
496 [EIDRM] = TARGET_EIDRM,
497 [ECHRNG] = TARGET_ECHRNG,
498 [EL2NSYNC] = TARGET_EL2NSYNC,
499 [EL3HLT] = TARGET_EL3HLT,
500 [EL3RST] = TARGET_EL3RST,
501 [ELNRNG] = TARGET_ELNRNG,
502 [EUNATCH] = TARGET_EUNATCH,
503 [ENOCSI] = TARGET_ENOCSI,
504 [EL2HLT] = TARGET_EL2HLT,
505 [EDEADLK] = TARGET_EDEADLK,
506 [ENOLCK] = TARGET_ENOLCK,
507 [EBADE] = TARGET_EBADE,
508 [EBADR] = TARGET_EBADR,
509 [EXFULL] = TARGET_EXFULL,
510 [ENOANO] = TARGET_ENOANO,
511 [EBADRQC] = TARGET_EBADRQC,
512 [EBADSLT] = TARGET_EBADSLT,
513 [EBFONT] = TARGET_EBFONT,
514 [ENOSTR] = TARGET_ENOSTR,
515 [ENODATA] = TARGET_ENODATA,
516 [ETIME] = TARGET_ETIME,
517 [ENOSR] = TARGET_ENOSR,
518 [ENONET] = TARGET_ENONET,
519 [ENOPKG] = TARGET_ENOPKG,
520 [EREMOTE] = TARGET_EREMOTE,
521 [ENOLINK] = TARGET_ENOLINK,
522 [EADV] = TARGET_EADV,
523 [ESRMNT] = TARGET_ESRMNT,
524 [ECOMM] = TARGET_ECOMM,
525 [EPROTO] = TARGET_EPROTO,
526 [EDOTDOT] = TARGET_EDOTDOT,
527 [EMULTIHOP] = TARGET_EMULTIHOP,
528 [EBADMSG] = TARGET_EBADMSG,
529 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
530 [EOVERFLOW] = TARGET_EOVERFLOW,
531 [ENOTUNIQ] = TARGET_ENOTUNIQ,
532 [EBADFD] = TARGET_EBADFD,
533 [EREMCHG] = TARGET_EREMCHG,
534 [ELIBACC] = TARGET_ELIBACC,
535 [ELIBBAD] = TARGET_ELIBBAD,
536 [ELIBSCN] = TARGET_ELIBSCN,
537 [ELIBMAX] = TARGET_ELIBMAX,
538 [ELIBEXEC] = TARGET_ELIBEXEC,
539 [EILSEQ] = TARGET_EILSEQ,
540 [ENOSYS] = TARGET_ENOSYS,
541 [ELOOP] = TARGET_ELOOP,
542 [ERESTART] = TARGET_ERESTART,
543 [ESTRPIPE] = TARGET_ESTRPIPE,
544 [ENOTEMPTY] = TARGET_ENOTEMPTY,
545 [EUSERS] = TARGET_EUSERS,
546 [ENOTSOCK] = TARGET_ENOTSOCK,
547 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
548 [EMSGSIZE] = TARGET_EMSGSIZE,
549 [EPROTOTYPE] = TARGET_EPROTOTYPE,
550 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
551 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
552 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
553 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
554 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
555 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
556 [EADDRINUSE] = TARGET_EADDRINUSE,
557 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
558 [ENETDOWN] = TARGET_ENETDOWN,
559 [ENETUNREACH] = TARGET_ENETUNREACH,
560 [ENETRESET] = TARGET_ENETRESET,
561 [ECONNABORTED] = TARGET_ECONNABORTED,
562 [ECONNRESET] = TARGET_ECONNRESET,
563 [ENOBUFS] = TARGET_ENOBUFS,
564 [EISCONN] = TARGET_EISCONN,
565 [ENOTCONN] = TARGET_ENOTCONN,
566 [EUCLEAN] = TARGET_EUCLEAN,
567 [ENOTNAM] = TARGET_ENOTNAM,
568 [ENAVAIL] = TARGET_ENAVAIL,
569 [EISNAM] = TARGET_EISNAM,
570 [EREMOTEIO] = TARGET_EREMOTEIO,
571 [ESHUTDOWN] = TARGET_ESHUTDOWN,
572 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
573 [ETIMEDOUT] = TARGET_ETIMEDOUT,
574 [ECONNREFUSED] = TARGET_ECONNREFUSED,
575 [EHOSTDOWN] = TARGET_EHOSTDOWN,
576 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
577 [EALREADY] = TARGET_EALREADY,
578 [EINPROGRESS] = TARGET_EINPROGRESS,
579 [ESTALE] = TARGET_ESTALE,
580 [ECANCELED] = TARGET_ECANCELED,
581 [ENOMEDIUM] = TARGET_ENOMEDIUM,
582 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
584 [ENOKEY] = TARGET_ENOKEY,
587 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
590 [EKEYREVOKED] = TARGET_EKEYREVOKED,
593 [EKEYREJECTED] = TARGET_EKEYREJECTED,
596 [EOWNERDEAD] = TARGET_EOWNERDEAD,
598 #ifdef ENOTRECOVERABLE
599 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
603 static inline int host_to_target_errno(int err)
605 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
606 host_to_target_errno_table[err]) {
607 return host_to_target_errno_table[err];
612 static inline int target_to_host_errno(int err)
614 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
615 target_to_host_errno_table[err]) {
616 return target_to_host_errno_table[err];
621 static inline abi_long get_errno(abi_long ret)
624 return -host_to_target_errno(errno);
629 static inline int is_error(abi_long ret)
631 return (abi_ulong)ret >= (abi_ulong)(-4096);
634 char *target_strerror(int err)
636 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
639 return strerror(target_to_host_errno(err));
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
645 return safe_syscall(__NR_##name); \
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
651 return safe_syscall(__NR_##name, arg1); \
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
657 return safe_syscall(__NR_##name, arg1, arg2); \
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
663 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
670 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674 type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
678 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682 type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684 type5 arg5, type6 arg6) \
686 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
689 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
690 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
691 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
692 int, flags, mode_t, mode)
693 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
694 struct rusage *, rusage)
695 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
696 int, options, struct rusage *, rusage)
697 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
698 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
699 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
700 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
701 const struct timespec *,timeout,int *,uaddr2,int,val3)
703 static inline int host_to_target_sock_type(int host_type)
707 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
709 target_type = TARGET_SOCK_DGRAM;
712 target_type = TARGET_SOCK_STREAM;
715 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
719 #if defined(SOCK_CLOEXEC)
720 if (host_type & SOCK_CLOEXEC) {
721 target_type |= TARGET_SOCK_CLOEXEC;
725 #if defined(SOCK_NONBLOCK)
726 if (host_type & SOCK_NONBLOCK) {
727 target_type |= TARGET_SOCK_NONBLOCK;
734 static abi_ulong target_brk;
735 static abi_ulong target_original_brk;
736 static abi_ulong brk_page;
738 void target_set_brk(abi_ulong new_brk)
740 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
741 brk_page = HOST_PAGE_ALIGN(target_brk);
744 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
745 #define DEBUGF_BRK(message, args...)
747 /* do_brk() must return target values and target errnos. */
748 abi_long do_brk(abi_ulong new_brk)
750 abi_long mapped_addr;
753 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
756 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
759 if (new_brk < target_original_brk) {
760 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
765 /* If the new brk is less than the highest page reserved to the
766 * target heap allocation, set it and we're almost done... */
767 if (new_brk <= brk_page) {
768 /* Heap contents are initialized to zero, as for anonymous
770 if (new_brk > target_brk) {
771 memset(g2h(target_brk), 0, new_brk - target_brk);
773 target_brk = new_brk;
774 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
778 /* We need to allocate more memory after the brk... Note that
779 * we don't use MAP_FIXED because that will map over the top of
780 * any existing mapping (like the one with the host libc or qemu
781 * itself); instead we treat "mapped but at wrong address" as
782 * a failure and unmap again.
784 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
785 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
786 PROT_READ|PROT_WRITE,
787 MAP_ANON|MAP_PRIVATE, 0, 0));
789 if (mapped_addr == brk_page) {
790 /* Heap contents are initialized to zero, as for anonymous
791 * mapped pages. Technically the new pages are already
792 * initialized to zero since they *are* anonymous mapped
793 * pages, however we have to take care with the contents that
794 * come from the remaining part of the previous page: it may
795 * contains garbage data due to a previous heap usage (grown
797 memset(g2h(target_brk), 0, brk_page - target_brk);
799 target_brk = new_brk;
800 brk_page = HOST_PAGE_ALIGN(target_brk);
801 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
804 } else if (mapped_addr != -1) {
805 /* Mapped but at wrong address, meaning there wasn't actually
806 * enough space for this brk.
808 target_munmap(mapped_addr, new_alloc_size);
810 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
813 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
816 #if defined(TARGET_ALPHA)
817 /* We (partially) emulate OSF/1 on Alpha, which requires we
818 return a proper errno, not an unchanged brk value. */
819 return -TARGET_ENOMEM;
821 /* For everything else, return the previous break. */
825 static inline abi_long copy_from_user_fdset(fd_set *fds,
826 abi_ulong target_fds_addr,
830 abi_ulong b, *target_fds;
832 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
833 if (!(target_fds = lock_user(VERIFY_READ,
835 sizeof(abi_ulong) * nw,
837 return -TARGET_EFAULT;
841 for (i = 0; i < nw; i++) {
842 /* grab the abi_ulong */
843 __get_user(b, &target_fds[i]);
844 for (j = 0; j < TARGET_ABI_BITS; j++) {
845 /* check the bit inside the abi_ulong */
852 unlock_user(target_fds, target_fds_addr, 0);
857 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
858 abi_ulong target_fds_addr,
861 if (target_fds_addr) {
862 if (copy_from_user_fdset(fds, target_fds_addr, n))
863 return -TARGET_EFAULT;
871 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
877 abi_ulong *target_fds;
879 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
880 if (!(target_fds = lock_user(VERIFY_WRITE,
882 sizeof(abi_ulong) * nw,
884 return -TARGET_EFAULT;
887 for (i = 0; i < nw; i++) {
889 for (j = 0; j < TARGET_ABI_BITS; j++) {
890 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
893 __put_user(v, &target_fds[i]);
896 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
901 #if defined(__alpha__)
907 static inline abi_long host_to_target_clock_t(long ticks)
909 #if HOST_HZ == TARGET_HZ
912 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
916 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
917 const struct rusage *rusage)
919 struct target_rusage *target_rusage;
921 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
922 return -TARGET_EFAULT;
923 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
924 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
925 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
926 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
927 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
928 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
929 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
930 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
931 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
932 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
933 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
934 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
935 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
936 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
937 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
938 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
939 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
940 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
941 unlock_user_struct(target_rusage, target_addr, 1);
946 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
948 abi_ulong target_rlim_swap;
951 target_rlim_swap = tswapal(target_rlim);
952 if (target_rlim_swap == TARGET_RLIM_INFINITY)
953 return RLIM_INFINITY;
955 result = target_rlim_swap;
956 if (target_rlim_swap != (rlim_t)result)
957 return RLIM_INFINITY;
962 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
964 abi_ulong target_rlim_swap;
967 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
968 target_rlim_swap = TARGET_RLIM_INFINITY;
970 target_rlim_swap = rlim;
971 result = tswapal(target_rlim_swap);
976 static inline int target_to_host_resource(int code)
979 case TARGET_RLIMIT_AS:
981 case TARGET_RLIMIT_CORE:
983 case TARGET_RLIMIT_CPU:
985 case TARGET_RLIMIT_DATA:
987 case TARGET_RLIMIT_FSIZE:
989 case TARGET_RLIMIT_LOCKS:
991 case TARGET_RLIMIT_MEMLOCK:
992 return RLIMIT_MEMLOCK;
993 case TARGET_RLIMIT_MSGQUEUE:
994 return RLIMIT_MSGQUEUE;
995 case TARGET_RLIMIT_NICE:
997 case TARGET_RLIMIT_NOFILE:
998 return RLIMIT_NOFILE;
999 case TARGET_RLIMIT_NPROC:
1000 return RLIMIT_NPROC;
1001 case TARGET_RLIMIT_RSS:
1003 case TARGET_RLIMIT_RTPRIO:
1004 return RLIMIT_RTPRIO;
1005 case TARGET_RLIMIT_SIGPENDING:
1006 return RLIMIT_SIGPENDING;
1007 case TARGET_RLIMIT_STACK:
1008 return RLIMIT_STACK;
1014 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1015 abi_ulong target_tv_addr)
1017 struct target_timeval *target_tv;
1019 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1020 return -TARGET_EFAULT;
1022 __get_user(tv->tv_sec, &target_tv->tv_sec);
1023 __get_user(tv->tv_usec, &target_tv->tv_usec);
1025 unlock_user_struct(target_tv, target_tv_addr, 0);
1030 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1031 const struct timeval *tv)
1033 struct target_timeval *target_tv;
1035 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1036 return -TARGET_EFAULT;
1038 __put_user(tv->tv_sec, &target_tv->tv_sec);
1039 __put_user(tv->tv_usec, &target_tv->tv_usec);
1041 unlock_user_struct(target_tv, target_tv_addr, 1);
1046 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1047 abi_ulong target_tz_addr)
1049 struct target_timezone *target_tz;
1051 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1052 return -TARGET_EFAULT;
1055 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1056 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1058 unlock_user_struct(target_tz, target_tz_addr, 0);
1063 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1066 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1067 abi_ulong target_mq_attr_addr)
1069 struct target_mq_attr *target_mq_attr;
1071 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1072 target_mq_attr_addr, 1))
1073 return -TARGET_EFAULT;
1075 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1076 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1077 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1078 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1080 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1085 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1086 const struct mq_attr *attr)
1088 struct target_mq_attr *target_mq_attr;
1090 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1091 target_mq_attr_addr, 0))
1092 return -TARGET_EFAULT;
1094 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1095 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1096 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1097 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1099 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1105 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1106 /* do_select() must return target values and target errnos. */
1107 static abi_long do_select(int n,
1108 abi_ulong rfd_addr, abi_ulong wfd_addr,
1109 abi_ulong efd_addr, abi_ulong target_tv_addr)
1111 fd_set rfds, wfds, efds;
1112 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1114 struct timespec ts, *ts_ptr;
1117 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1121 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1125 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1130 if (target_tv_addr) {
1131 if (copy_from_user_timeval(&tv, target_tv_addr))
1132 return -TARGET_EFAULT;
1133 ts.tv_sec = tv.tv_sec;
1134 ts.tv_nsec = tv.tv_usec * 1000;
1140 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1143 if (!is_error(ret)) {
1144 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1145 return -TARGET_EFAULT;
1146 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1147 return -TARGET_EFAULT;
1148 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1149 return -TARGET_EFAULT;
1151 if (target_tv_addr) {
1152 tv.tv_sec = ts.tv_sec;
1153 tv.tv_usec = ts.tv_nsec / 1000;
1154 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1155 return -TARGET_EFAULT;
1164 static abi_long do_pipe2(int host_pipe[], int flags)
1167 return pipe2(host_pipe, flags);
1173 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1174 int flags, int is_pipe2)
1178 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1181 return get_errno(ret);
1183 /* Several targets have special calling conventions for the original
1184 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1186 #if defined(TARGET_ALPHA)
1187 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1188 return host_pipe[0];
1189 #elif defined(TARGET_MIPS)
1190 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1191 return host_pipe[0];
1192 #elif defined(TARGET_SH4)
1193 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1194 return host_pipe[0];
1195 #elif defined(TARGET_SPARC)
1196 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1197 return host_pipe[0];
1201 if (put_user_s32(host_pipe[0], pipedes)
1202 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1203 return -TARGET_EFAULT;
1204 return get_errno(ret);
1207 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1208 abi_ulong target_addr,
1211 struct target_ip_mreqn *target_smreqn;
1213 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1215 return -TARGET_EFAULT;
1216 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1217 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1218 if (len == sizeof(struct target_ip_mreqn))
1219 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1220 unlock_user(target_smreqn, target_addr, 0);
1225 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1226 abi_ulong target_addr,
1229 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1230 sa_family_t sa_family;
1231 struct target_sockaddr *target_saddr;
1233 if (fd_trans_target_to_host_addr(fd)) {
1234 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1237 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1239 return -TARGET_EFAULT;
1241 sa_family = tswap16(target_saddr->sa_family);
1243 /* Oops. The caller might send a incomplete sun_path; sun_path
1244 * must be terminated by \0 (see the manual page), but
1245 * unfortunately it is quite common to specify sockaddr_un
1246 * length as "strlen(x->sun_path)" while it should be
1247 * "strlen(...) + 1". We'll fix that here if needed.
1248 * Linux kernel has a similar feature.
1251 if (sa_family == AF_UNIX) {
1252 if (len < unix_maxlen && len > 0) {
1253 char *cp = (char*)target_saddr;
1255 if ( cp[len-1] && !cp[len] )
1258 if (len > unix_maxlen)
1262 memcpy(addr, target_saddr, len);
1263 addr->sa_family = sa_family;
1264 if (sa_family == AF_PACKET) {
1265 struct target_sockaddr_ll *lladdr;
1267 lladdr = (struct target_sockaddr_ll *)addr;
1268 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1269 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1271 unlock_user(target_saddr, target_addr, 0);
1276 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1277 struct sockaddr *addr,
1280 struct target_sockaddr *target_saddr;
1282 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1284 return -TARGET_EFAULT;
1285 memcpy(target_saddr, addr, len);
1286 target_saddr->sa_family = tswap16(addr->sa_family);
1287 unlock_user(target_saddr, target_addr, len);
1292 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1293 struct target_msghdr *target_msgh)
1295 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1296 abi_long msg_controllen;
1297 abi_ulong target_cmsg_addr;
1298 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1299 socklen_t space = 0;
1301 msg_controllen = tswapal(target_msgh->msg_controllen);
1302 if (msg_controllen < sizeof (struct target_cmsghdr))
1304 target_cmsg_addr = tswapal(target_msgh->msg_control);
1305 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1306 target_cmsg_start = target_cmsg;
1308 return -TARGET_EFAULT;
1310 while (cmsg && target_cmsg) {
1311 void *data = CMSG_DATA(cmsg);
1312 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1314 int len = tswapal(target_cmsg->cmsg_len)
1315 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1317 space += CMSG_SPACE(len);
1318 if (space > msgh->msg_controllen) {
1319 space -= CMSG_SPACE(len);
1320 /* This is a QEMU bug, since we allocated the payload
1321 * area ourselves (unlike overflow in host-to-target
1322 * conversion, which is just the guest giving us a buffer
1323 * that's too small). It can't happen for the payload types
1324 * we currently support; if it becomes an issue in future
1325 * we would need to improve our allocation strategy to
1326 * something more intelligent than "twice the size of the
1327 * target buffer we're reading from".
1329 gemu_log("Host cmsg overflow\n");
1333 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1334 cmsg->cmsg_level = SOL_SOCKET;
1336 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1338 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1339 cmsg->cmsg_len = CMSG_LEN(len);
1341 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1342 int *fd = (int *)data;
1343 int *target_fd = (int *)target_data;
1344 int i, numfds = len / sizeof(int);
1346 for (i = 0; i < numfds; i++) {
1347 __get_user(fd[i], target_fd + i);
1349 } else if (cmsg->cmsg_level == SOL_SOCKET
1350 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1351 struct ucred *cred = (struct ucred *)data;
1352 struct target_ucred *target_cred =
1353 (struct target_ucred *)target_data;
1355 __get_user(cred->pid, &target_cred->pid);
1356 __get_user(cred->uid, &target_cred->uid);
1357 __get_user(cred->gid, &target_cred->gid);
1359 gemu_log("Unsupported ancillary data: %d/%d\n",
1360 cmsg->cmsg_level, cmsg->cmsg_type);
1361 memcpy(data, target_data, len);
1364 cmsg = CMSG_NXTHDR(msgh, cmsg);
1365 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1368 unlock_user(target_cmsg, target_cmsg_addr, 0);
1370 msgh->msg_controllen = space;
1374 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1375 struct msghdr *msgh)
1377 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1378 abi_long msg_controllen;
1379 abi_ulong target_cmsg_addr;
1380 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1381 socklen_t space = 0;
1383 msg_controllen = tswapal(target_msgh->msg_controllen);
1384 if (msg_controllen < sizeof (struct target_cmsghdr))
1386 target_cmsg_addr = tswapal(target_msgh->msg_control);
1387 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1388 target_cmsg_start = target_cmsg;
1390 return -TARGET_EFAULT;
1392 while (cmsg && target_cmsg) {
1393 void *data = CMSG_DATA(cmsg);
1394 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1396 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1397 int tgt_len, tgt_space;
1399 /* We never copy a half-header but may copy half-data;
1400 * this is Linux's behaviour in put_cmsg(). Note that
1401 * truncation here is a guest problem (which we report
1402 * to the guest via the CTRUNC bit), unlike truncation
1403 * in target_to_host_cmsg, which is a QEMU bug.
1405 if (msg_controllen < sizeof(struct cmsghdr)) {
1406 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1410 if (cmsg->cmsg_level == SOL_SOCKET) {
1411 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1413 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1415 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1417 tgt_len = TARGET_CMSG_LEN(len);
1419 /* Payload types which need a different size of payload on
1420 * the target must adjust tgt_len here.
1422 switch (cmsg->cmsg_level) {
1424 switch (cmsg->cmsg_type) {
1426 tgt_len = sizeof(struct target_timeval);
1435 if (msg_controllen < tgt_len) {
1436 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1437 tgt_len = msg_controllen;
1440 /* We must now copy-and-convert len bytes of payload
1441 * into tgt_len bytes of destination space. Bear in mind
1442 * that in both source and destination we may be dealing
1443 * with a truncated value!
1445 switch (cmsg->cmsg_level) {
1447 switch (cmsg->cmsg_type) {
1450 int *fd = (int *)data;
1451 int *target_fd = (int *)target_data;
1452 int i, numfds = tgt_len / sizeof(int);
1454 for (i = 0; i < numfds; i++) {
1455 __put_user(fd[i], target_fd + i);
1461 struct timeval *tv = (struct timeval *)data;
1462 struct target_timeval *target_tv =
1463 (struct target_timeval *)target_data;
1465 if (len != sizeof(struct timeval) ||
1466 tgt_len != sizeof(struct target_timeval)) {
1470 /* copy struct timeval to target */
1471 __put_user(tv->tv_sec, &target_tv->tv_sec);
1472 __put_user(tv->tv_usec, &target_tv->tv_usec);
1475 case SCM_CREDENTIALS:
1477 struct ucred *cred = (struct ucred *)data;
1478 struct target_ucred *target_cred =
1479 (struct target_ucred *)target_data;
1481 __put_user(cred->pid, &target_cred->pid);
1482 __put_user(cred->uid, &target_cred->uid);
1483 __put_user(cred->gid, &target_cred->gid);
1493 gemu_log("Unsupported ancillary data: %d/%d\n",
1494 cmsg->cmsg_level, cmsg->cmsg_type);
1495 memcpy(target_data, data, MIN(len, tgt_len));
1496 if (tgt_len > len) {
1497 memset(target_data + len, 0, tgt_len - len);
1501 target_cmsg->cmsg_len = tswapal(tgt_len);
1502 tgt_space = TARGET_CMSG_SPACE(len);
1503 if (msg_controllen < tgt_space) {
1504 tgt_space = msg_controllen;
1506 msg_controllen -= tgt_space;
1508 cmsg = CMSG_NXTHDR(msgh, cmsg);
1509 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1512 unlock_user(target_cmsg, target_cmsg_addr, space);
1514 target_msgh->msg_controllen = tswapal(space);
1518 /* do_setsockopt() Must return target values and target errnos. */
1519 static abi_long do_setsockopt(int sockfd, int level, int optname,
1520 abi_ulong optval_addr, socklen_t optlen)
1524 struct ip_mreqn *ip_mreq;
1525 struct ip_mreq_source *ip_mreq_source;
1529 /* TCP options all take an 'int' value. */
1530 if (optlen < sizeof(uint32_t))
1531 return -TARGET_EINVAL;
1533 if (get_user_u32(val, optval_addr))
1534 return -TARGET_EFAULT;
1535 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1542 case IP_ROUTER_ALERT:
1546 case IP_MTU_DISCOVER:
1552 case IP_MULTICAST_TTL:
1553 case IP_MULTICAST_LOOP:
1555 if (optlen >= sizeof(uint32_t)) {
1556 if (get_user_u32(val, optval_addr))
1557 return -TARGET_EFAULT;
1558 } else if (optlen >= 1) {
1559 if (get_user_u8(val, optval_addr))
1560 return -TARGET_EFAULT;
1562 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1564 case IP_ADD_MEMBERSHIP:
1565 case IP_DROP_MEMBERSHIP:
1566 if (optlen < sizeof (struct target_ip_mreq) ||
1567 optlen > sizeof (struct target_ip_mreqn))
1568 return -TARGET_EINVAL;
1570 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1571 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1572 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1575 case IP_BLOCK_SOURCE:
1576 case IP_UNBLOCK_SOURCE:
1577 case IP_ADD_SOURCE_MEMBERSHIP:
1578 case IP_DROP_SOURCE_MEMBERSHIP:
1579 if (optlen != sizeof (struct target_ip_mreq_source))
1580 return -TARGET_EINVAL;
1582 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1583 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1584 unlock_user (ip_mreq_source, optval_addr, 0);
1593 case IPV6_MTU_DISCOVER:
1596 case IPV6_RECVPKTINFO:
1598 if (optlen < sizeof(uint32_t)) {
1599 return -TARGET_EINVAL;
1601 if (get_user_u32(val, optval_addr)) {
1602 return -TARGET_EFAULT;
1604 ret = get_errno(setsockopt(sockfd, level, optname,
1605 &val, sizeof(val)));
1614 /* struct icmp_filter takes an u32 value */
1615 if (optlen < sizeof(uint32_t)) {
1616 return -TARGET_EINVAL;
1619 if (get_user_u32(val, optval_addr)) {
1620 return -TARGET_EFAULT;
1622 ret = get_errno(setsockopt(sockfd, level, optname,
1623 &val, sizeof(val)));
1630 case TARGET_SOL_SOCKET:
1632 case TARGET_SO_RCVTIMEO:
1636 optname = SO_RCVTIMEO;
1639 if (optlen != sizeof(struct target_timeval)) {
1640 return -TARGET_EINVAL;
1643 if (copy_from_user_timeval(&tv, optval_addr)) {
1644 return -TARGET_EFAULT;
1647 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1651 case TARGET_SO_SNDTIMEO:
1652 optname = SO_SNDTIMEO;
1654 case TARGET_SO_ATTACH_FILTER:
1656 struct target_sock_fprog *tfprog;
1657 struct target_sock_filter *tfilter;
1658 struct sock_fprog fprog;
1659 struct sock_filter *filter;
1662 if (optlen != sizeof(*tfprog)) {
1663 return -TARGET_EINVAL;
1665 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1666 return -TARGET_EFAULT;
1668 if (!lock_user_struct(VERIFY_READ, tfilter,
1669 tswapal(tfprog->filter), 0)) {
1670 unlock_user_struct(tfprog, optval_addr, 1);
1671 return -TARGET_EFAULT;
1674 fprog.len = tswap16(tfprog->len);
1675 filter = g_try_new(struct sock_filter, fprog.len);
1676 if (filter == NULL) {
1677 unlock_user_struct(tfilter, tfprog->filter, 1);
1678 unlock_user_struct(tfprog, optval_addr, 1);
1679 return -TARGET_ENOMEM;
1681 for (i = 0; i < fprog.len; i++) {
1682 filter[i].code = tswap16(tfilter[i].code);
1683 filter[i].jt = tfilter[i].jt;
1684 filter[i].jf = tfilter[i].jf;
1685 filter[i].k = tswap32(tfilter[i].k);
1687 fprog.filter = filter;
1689 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1690 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1693 unlock_user_struct(tfilter, tfprog->filter, 1);
1694 unlock_user_struct(tfprog, optval_addr, 1);
1697 case TARGET_SO_BINDTODEVICE:
1699 char *dev_ifname, *addr_ifname;
1701 if (optlen > IFNAMSIZ - 1) {
1702 optlen = IFNAMSIZ - 1;
1704 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1706 return -TARGET_EFAULT;
1708 optname = SO_BINDTODEVICE;
1709 addr_ifname = alloca(IFNAMSIZ);
1710 memcpy(addr_ifname, dev_ifname, optlen);
1711 addr_ifname[optlen] = 0;
1712 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1713 addr_ifname, optlen));
1714 unlock_user (dev_ifname, optval_addr, 0);
1717 /* Options with 'int' argument. */
1718 case TARGET_SO_DEBUG:
1721 case TARGET_SO_REUSEADDR:
1722 optname = SO_REUSEADDR;
1724 case TARGET_SO_TYPE:
1727 case TARGET_SO_ERROR:
1730 case TARGET_SO_DONTROUTE:
1731 optname = SO_DONTROUTE;
1733 case TARGET_SO_BROADCAST:
1734 optname = SO_BROADCAST;
1736 case TARGET_SO_SNDBUF:
1737 optname = SO_SNDBUF;
1739 case TARGET_SO_SNDBUFFORCE:
1740 optname = SO_SNDBUFFORCE;
1742 case TARGET_SO_RCVBUF:
1743 optname = SO_RCVBUF;
1745 case TARGET_SO_RCVBUFFORCE:
1746 optname = SO_RCVBUFFORCE;
1748 case TARGET_SO_KEEPALIVE:
1749 optname = SO_KEEPALIVE;
1751 case TARGET_SO_OOBINLINE:
1752 optname = SO_OOBINLINE;
1754 case TARGET_SO_NO_CHECK:
1755 optname = SO_NO_CHECK;
1757 case TARGET_SO_PRIORITY:
1758 optname = SO_PRIORITY;
1761 case TARGET_SO_BSDCOMPAT:
1762 optname = SO_BSDCOMPAT;
1765 case TARGET_SO_PASSCRED:
1766 optname = SO_PASSCRED;
1768 case TARGET_SO_PASSSEC:
1769 optname = SO_PASSSEC;
1771 case TARGET_SO_TIMESTAMP:
1772 optname = SO_TIMESTAMP;
1774 case TARGET_SO_RCVLOWAT:
1775 optname = SO_RCVLOWAT;
1781 if (optlen < sizeof(uint32_t))
1782 return -TARGET_EINVAL;
1784 if (get_user_u32(val, optval_addr))
1785 return -TARGET_EFAULT;
1786 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1790 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1791 ret = -TARGET_ENOPROTOOPT;
1796 /* do_getsockopt() Must return target values and target errnos. */
1797 static abi_long do_getsockopt(int sockfd, int level, int optname,
1798 abi_ulong optval_addr, abi_ulong optlen)
1805 case TARGET_SOL_SOCKET:
1808 /* These don't just return a single integer */
1809 case TARGET_SO_LINGER:
1810 case TARGET_SO_RCVTIMEO:
1811 case TARGET_SO_SNDTIMEO:
1812 case TARGET_SO_PEERNAME:
1814 case TARGET_SO_PEERCRED: {
1817 struct target_ucred *tcr;
1819 if (get_user_u32(len, optlen)) {
1820 return -TARGET_EFAULT;
1823 return -TARGET_EINVAL;
1827 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1835 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1836 return -TARGET_EFAULT;
1838 __put_user(cr.pid, &tcr->pid);
1839 __put_user(cr.uid, &tcr->uid);
1840 __put_user(cr.gid, &tcr->gid);
1841 unlock_user_struct(tcr, optval_addr, 1);
1842 if (put_user_u32(len, optlen)) {
1843 return -TARGET_EFAULT;
1847 /* Options with 'int' argument. */
1848 case TARGET_SO_DEBUG:
1851 case TARGET_SO_REUSEADDR:
1852 optname = SO_REUSEADDR;
1854 case TARGET_SO_TYPE:
1857 case TARGET_SO_ERROR:
1860 case TARGET_SO_DONTROUTE:
1861 optname = SO_DONTROUTE;
1863 case TARGET_SO_BROADCAST:
1864 optname = SO_BROADCAST;
1866 case TARGET_SO_SNDBUF:
1867 optname = SO_SNDBUF;
1869 case TARGET_SO_RCVBUF:
1870 optname = SO_RCVBUF;
1872 case TARGET_SO_KEEPALIVE:
1873 optname = SO_KEEPALIVE;
1875 case TARGET_SO_OOBINLINE:
1876 optname = SO_OOBINLINE;
1878 case TARGET_SO_NO_CHECK:
1879 optname = SO_NO_CHECK;
1881 case TARGET_SO_PRIORITY:
1882 optname = SO_PRIORITY;
1885 case TARGET_SO_BSDCOMPAT:
1886 optname = SO_BSDCOMPAT;
1889 case TARGET_SO_PASSCRED:
1890 optname = SO_PASSCRED;
1892 case TARGET_SO_TIMESTAMP:
1893 optname = SO_TIMESTAMP;
1895 case TARGET_SO_RCVLOWAT:
1896 optname = SO_RCVLOWAT;
1898 case TARGET_SO_ACCEPTCONN:
1899 optname = SO_ACCEPTCONN;
1906 /* TCP options all take an 'int' value. */
1908 if (get_user_u32(len, optlen))
1909 return -TARGET_EFAULT;
1911 return -TARGET_EINVAL;
1913 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1916 if (optname == SO_TYPE) {
1917 val = host_to_target_sock_type(val);
1922 if (put_user_u32(val, optval_addr))
1923 return -TARGET_EFAULT;
1925 if (put_user_u8(val, optval_addr))
1926 return -TARGET_EFAULT;
1928 if (put_user_u32(len, optlen))
1929 return -TARGET_EFAULT;
1936 case IP_ROUTER_ALERT:
1940 case IP_MTU_DISCOVER:
1946 case IP_MULTICAST_TTL:
1947 case IP_MULTICAST_LOOP:
1948 if (get_user_u32(len, optlen))
1949 return -TARGET_EFAULT;
1951 return -TARGET_EINVAL;
1953 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1956 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1958 if (put_user_u32(len, optlen)
1959 || put_user_u8(val, optval_addr))
1960 return -TARGET_EFAULT;
1962 if (len > sizeof(int))
1964 if (put_user_u32(len, optlen)
1965 || put_user_u32(val, optval_addr))
1966 return -TARGET_EFAULT;
1970 ret = -TARGET_ENOPROTOOPT;
1976 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1978 ret = -TARGET_EOPNOTSUPP;
1984 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1985 int count, int copy)
1987 struct target_iovec *target_vec;
1989 abi_ulong total_len, max_len;
1992 bool bad_address = false;
1998 if (count < 0 || count > IOV_MAX) {
2003 vec = g_try_new0(struct iovec, count);
2009 target_vec = lock_user(VERIFY_READ, target_addr,
2010 count * sizeof(struct target_iovec), 1);
2011 if (target_vec == NULL) {
2016 /* ??? If host page size > target page size, this will result in a
2017 value larger than what we can actually support. */
2018 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2021 for (i = 0; i < count; i++) {
2022 abi_ulong base = tswapal(target_vec[i].iov_base);
2023 abi_long len = tswapal(target_vec[i].iov_len);
2028 } else if (len == 0) {
2029 /* Zero length pointer is ignored. */
2030 vec[i].iov_base = 0;
2032 vec[i].iov_base = lock_user(type, base, len, copy);
2033 /* If the first buffer pointer is bad, this is a fault. But
2034 * subsequent bad buffers will result in a partial write; this
2035 * is realized by filling the vector with null pointers and
2037 if (!vec[i].iov_base) {
2048 if (len > max_len - total_len) {
2049 len = max_len - total_len;
2052 vec[i].iov_len = len;
2056 unlock_user(target_vec, target_addr, 0);
2061 if (tswapal(target_vec[i].iov_len) > 0) {
2062 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2065 unlock_user(target_vec, target_addr, 0);
2072 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2073 int count, int copy)
2075 struct target_iovec *target_vec;
2078 target_vec = lock_user(VERIFY_READ, target_addr,
2079 count * sizeof(struct target_iovec), 1);
2081 for (i = 0; i < count; i++) {
2082 abi_ulong base = tswapal(target_vec[i].iov_base);
2083 abi_long len = tswapal(target_vec[i].iov_len);
2087 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2089 unlock_user(target_vec, target_addr, 0);
2095 static inline int target_to_host_sock_type(int *type)
2098 int target_type = *type;
2100 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2101 case TARGET_SOCK_DGRAM:
2102 host_type = SOCK_DGRAM;
2104 case TARGET_SOCK_STREAM:
2105 host_type = SOCK_STREAM;
2108 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2111 if (target_type & TARGET_SOCK_CLOEXEC) {
2112 #if defined(SOCK_CLOEXEC)
2113 host_type |= SOCK_CLOEXEC;
2115 return -TARGET_EINVAL;
2118 if (target_type & TARGET_SOCK_NONBLOCK) {
2119 #if defined(SOCK_NONBLOCK)
2120 host_type |= SOCK_NONBLOCK;
2121 #elif !defined(O_NONBLOCK)
2122 return -TARGET_EINVAL;
2129 /* Try to emulate socket type flags after socket creation. */
2130 static int sock_flags_fixup(int fd, int target_type)
2132 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2133 if (target_type & TARGET_SOCK_NONBLOCK) {
2134 int flags = fcntl(fd, F_GETFL);
2135 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2137 return -TARGET_EINVAL;
2144 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2145 abi_ulong target_addr,
2148 struct sockaddr *addr = host_addr;
2149 struct target_sockaddr *target_saddr;
2151 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2152 if (!target_saddr) {
2153 return -TARGET_EFAULT;
2156 memcpy(addr, target_saddr, len);
2157 addr->sa_family = tswap16(target_saddr->sa_family);
2158 /* spkt_protocol is big-endian */
2160 unlock_user(target_saddr, target_addr, 0);
2164 static TargetFdTrans target_packet_trans = {
2165 .target_to_host_addr = packet_target_to_host_sockaddr,
2168 /* do_socket() Must return target values and target errnos. */
2169 static abi_long do_socket(int domain, int type, int protocol)
2171 int target_type = type;
2174 ret = target_to_host_sock_type(&type);
2179 if (domain == PF_NETLINK)
2180 return -TARGET_EAFNOSUPPORT;
2182 if (domain == AF_PACKET ||
2183 (domain == AF_INET && type == SOCK_PACKET)) {
2184 protocol = tswap16(protocol);
2187 ret = get_errno(socket(domain, type, protocol));
2189 ret = sock_flags_fixup(ret, target_type);
2190 if (type == SOCK_PACKET) {
2191 /* Manage an obsolete case :
2192 * if socket type is SOCK_PACKET, bind by name
2194 fd_trans_register(ret, &target_packet_trans);
2200 /* do_bind() Must return target values and target errnos. */
2201 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2207 if ((int)addrlen < 0) {
2208 return -TARGET_EINVAL;
2211 addr = alloca(addrlen+1);
2213 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2217 return get_errno(bind(sockfd, addr, addrlen));
2220 /* do_connect() Must return target values and target errnos. */
2221 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2227 if ((int)addrlen < 0) {
2228 return -TARGET_EINVAL;
2231 addr = alloca(addrlen+1);
2233 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2237 return get_errno(connect(sockfd, addr, addrlen));
2240 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2241 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2242 int flags, int send)
2248 abi_ulong target_vec;
2250 if (msgp->msg_name) {
2251 msg.msg_namelen = tswap32(msgp->msg_namelen);
2252 msg.msg_name = alloca(msg.msg_namelen+1);
2253 ret = target_to_host_sockaddr(fd, msg.msg_name,
2254 tswapal(msgp->msg_name),
2260 msg.msg_name = NULL;
2261 msg.msg_namelen = 0;
2263 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2264 msg.msg_control = alloca(msg.msg_controllen);
2265 msg.msg_flags = tswap32(msgp->msg_flags);
2267 count = tswapal(msgp->msg_iovlen);
2268 target_vec = tswapal(msgp->msg_iov);
2269 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2270 target_vec, count, send);
2272 ret = -host_to_target_errno(errno);
2275 msg.msg_iovlen = count;
2279 ret = target_to_host_cmsg(&msg, msgp);
2281 ret = get_errno(sendmsg(fd, &msg, flags));
2283 ret = get_errno(recvmsg(fd, &msg, flags));
2284 if (!is_error(ret)) {
2286 ret = host_to_target_cmsg(msgp, &msg);
2287 if (!is_error(ret)) {
2288 msgp->msg_namelen = tswap32(msg.msg_namelen);
2289 if (msg.msg_name != NULL) {
2290 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2291 msg.msg_name, msg.msg_namelen);
2303 unlock_iovec(vec, target_vec, count, !send);
2308 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2309 int flags, int send)
2312 struct target_msghdr *msgp;
2314 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2318 return -TARGET_EFAULT;
2320 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2321 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2325 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2326 * so it might not have this *mmsg-specific flag either.
2328 #ifndef MSG_WAITFORONE
2329 #define MSG_WAITFORONE 0x10000
2332 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2333 unsigned int vlen, unsigned int flags,
2336 struct target_mmsghdr *mmsgp;
2340 if (vlen > UIO_MAXIOV) {
2344 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2346 return -TARGET_EFAULT;
2349 for (i = 0; i < vlen; i++) {
2350 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2351 if (is_error(ret)) {
2354 mmsgp[i].msg_len = tswap32(ret);
2355 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2356 if (flags & MSG_WAITFORONE) {
2357 flags |= MSG_DONTWAIT;
2361 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2363 /* Return number of datagrams sent if we sent any at all;
2364 * otherwise return the error.
2372 /* If we don't have a system accept4() then just call accept.
2373 * The callsites to do_accept4() will ensure that they don't
2374 * pass a non-zero flags argument in this config.
2376 #ifndef CONFIG_ACCEPT4
2377 static inline int accept4(int sockfd, struct sockaddr *addr,
2378 socklen_t *addrlen, int flags)
2381 return accept(sockfd, addr, addrlen);
2385 /* do_accept4() Must return target values and target errnos. */
2386 static abi_long do_accept4(int fd, abi_ulong target_addr,
2387 abi_ulong target_addrlen_addr, int flags)
2394 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2396 if (target_addr == 0) {
2397 return get_errno(accept4(fd, NULL, NULL, host_flags));
2400 /* linux returns EINVAL if addrlen pointer is invalid */
2401 if (get_user_u32(addrlen, target_addrlen_addr))
2402 return -TARGET_EINVAL;
2404 if ((int)addrlen < 0) {
2405 return -TARGET_EINVAL;
2408 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2409 return -TARGET_EINVAL;
2411 addr = alloca(addrlen);
2413 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2414 if (!is_error(ret)) {
2415 host_to_target_sockaddr(target_addr, addr, addrlen);
2416 if (put_user_u32(addrlen, target_addrlen_addr))
2417 ret = -TARGET_EFAULT;
2422 /* do_getpeername() Must return target values and target errnos. */
2423 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2424 abi_ulong target_addrlen_addr)
2430 if (get_user_u32(addrlen, target_addrlen_addr))
2431 return -TARGET_EFAULT;
2433 if ((int)addrlen < 0) {
2434 return -TARGET_EINVAL;
2437 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2438 return -TARGET_EFAULT;
2440 addr = alloca(addrlen);
2442 ret = get_errno(getpeername(fd, addr, &addrlen));
2443 if (!is_error(ret)) {
2444 host_to_target_sockaddr(target_addr, addr, addrlen);
2445 if (put_user_u32(addrlen, target_addrlen_addr))
2446 ret = -TARGET_EFAULT;
2451 /* do_getsockname() Must return target values and target errnos. */
2452 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2453 abi_ulong target_addrlen_addr)
2459 if (get_user_u32(addrlen, target_addrlen_addr))
2460 return -TARGET_EFAULT;
2462 if ((int)addrlen < 0) {
2463 return -TARGET_EINVAL;
2466 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2467 return -TARGET_EFAULT;
2469 addr = alloca(addrlen);
2471 ret = get_errno(getsockname(fd, addr, &addrlen));
2472 if (!is_error(ret)) {
2473 host_to_target_sockaddr(target_addr, addr, addrlen);
2474 if (put_user_u32(addrlen, target_addrlen_addr))
2475 ret = -TARGET_EFAULT;
2480 /* do_socketpair() Must return target values and target errnos. */
2481 static abi_long do_socketpair(int domain, int type, int protocol,
2482 abi_ulong target_tab_addr)
2487 target_to_host_sock_type(&type);
2489 ret = get_errno(socketpair(domain, type, protocol, tab));
2490 if (!is_error(ret)) {
2491 if (put_user_s32(tab[0], target_tab_addr)
2492 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2493 ret = -TARGET_EFAULT;
2498 /* do_sendto() Must return target values and target errnos. */
2499 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2500 abi_ulong target_addr, socklen_t addrlen)
2506 if ((int)addrlen < 0) {
2507 return -TARGET_EINVAL;
2510 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2512 return -TARGET_EFAULT;
2514 addr = alloca(addrlen+1);
2515 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2517 unlock_user(host_msg, msg, 0);
2520 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2522 ret = get_errno(send(fd, host_msg, len, flags));
2524 unlock_user(host_msg, msg, 0);
2528 /* do_recvfrom() Must return target values and target errnos. */
2529 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2530 abi_ulong target_addr,
2531 abi_ulong target_addrlen)
2538 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2540 return -TARGET_EFAULT;
2542 if (get_user_u32(addrlen, target_addrlen)) {
2543 ret = -TARGET_EFAULT;
2546 if ((int)addrlen < 0) {
2547 ret = -TARGET_EINVAL;
2550 addr = alloca(addrlen);
2551 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2553 addr = NULL; /* To keep compiler quiet. */
2554 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2556 if (!is_error(ret)) {
2558 host_to_target_sockaddr(target_addr, addr, addrlen);
2559 if (put_user_u32(addrlen, target_addrlen)) {
2560 ret = -TARGET_EFAULT;
2564 unlock_user(host_msg, msg, len);
2567 unlock_user(host_msg, msg, 0);
2572 #ifdef TARGET_NR_socketcall
2573 /* do_socketcall() Must return target values and target errnos. */
2574 static abi_long do_socketcall(int num, abi_ulong vptr)
2576 static const unsigned ac[] = { /* number of arguments per call */
2577 [SOCKOP_socket] = 3, /* domain, type, protocol */
2578 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2579 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2580 [SOCKOP_listen] = 2, /* sockfd, backlog */
2581 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2582 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2583 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2584 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2585 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2586 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2587 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2588 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2589 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2590 [SOCKOP_shutdown] = 2, /* sockfd, how */
2591 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2592 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2593 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2594 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2595 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2596 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2598 abi_long a[6]; /* max 6 args */
2600 /* first, collect the arguments in a[] according to ac[] */
2601 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2603 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2604 for (i = 0; i < ac[num]; ++i) {
2605 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2606 return -TARGET_EFAULT;
2611 /* now when we have the args, actually handle the call */
2613 case SOCKOP_socket: /* domain, type, protocol */
2614 return do_socket(a[0], a[1], a[2]);
2615 case SOCKOP_bind: /* sockfd, addr, addrlen */
2616 return do_bind(a[0], a[1], a[2]);
2617 case SOCKOP_connect: /* sockfd, addr, addrlen */
2618 return do_connect(a[0], a[1], a[2]);
2619 case SOCKOP_listen: /* sockfd, backlog */
2620 return get_errno(listen(a[0], a[1]));
2621 case SOCKOP_accept: /* sockfd, addr, addrlen */
2622 return do_accept4(a[0], a[1], a[2], 0);
2623 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2624 return do_accept4(a[0], a[1], a[2], a[3]);
2625 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2626 return do_getsockname(a[0], a[1], a[2]);
2627 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2628 return do_getpeername(a[0], a[1], a[2]);
2629 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2630 return do_socketpair(a[0], a[1], a[2], a[3]);
2631 case SOCKOP_send: /* sockfd, msg, len, flags */
2632 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2633 case SOCKOP_recv: /* sockfd, msg, len, flags */
2634 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2635 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2636 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2637 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2638 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2639 case SOCKOP_shutdown: /* sockfd, how */
2640 return get_errno(shutdown(a[0], a[1]));
2641 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2642 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2643 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2644 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2645 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
2646 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
2647 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
2648 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
2649 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2650 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2651 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2652 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2654 gemu_log("Unsupported socketcall: %d\n", num);
2655 return -TARGET_ENOSYS;
2660 #define N_SHM_REGIONS 32
2662 static struct shm_region {
2666 } shm_regions[N_SHM_REGIONS];
2668 struct target_semid_ds
2670 struct target_ipc_perm sem_perm;
2671 abi_ulong sem_otime;
2672 #if !defined(TARGET_PPC64)
2673 abi_ulong __unused1;
2675 abi_ulong sem_ctime;
2676 #if !defined(TARGET_PPC64)
2677 abi_ulong __unused2;
2679 abi_ulong sem_nsems;
2680 abi_ulong __unused3;
2681 abi_ulong __unused4;
2684 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2685 abi_ulong target_addr)
2687 struct target_ipc_perm *target_ip;
2688 struct target_semid_ds *target_sd;
2690 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2691 return -TARGET_EFAULT;
2692 target_ip = &(target_sd->sem_perm);
2693 host_ip->__key = tswap32(target_ip->__key);
2694 host_ip->uid = tswap32(target_ip->uid);
2695 host_ip->gid = tswap32(target_ip->gid);
2696 host_ip->cuid = tswap32(target_ip->cuid);
2697 host_ip->cgid = tswap32(target_ip->cgid);
2698 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2699 host_ip->mode = tswap32(target_ip->mode);
2701 host_ip->mode = tswap16(target_ip->mode);
2703 #if defined(TARGET_PPC)
2704 host_ip->__seq = tswap32(target_ip->__seq);
2706 host_ip->__seq = tswap16(target_ip->__seq);
2708 unlock_user_struct(target_sd, target_addr, 0);
2712 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2713 struct ipc_perm *host_ip)
2715 struct target_ipc_perm *target_ip;
2716 struct target_semid_ds *target_sd;
2718 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2719 return -TARGET_EFAULT;
2720 target_ip = &(target_sd->sem_perm);
2721 target_ip->__key = tswap32(host_ip->__key);
2722 target_ip->uid = tswap32(host_ip->uid);
2723 target_ip->gid = tswap32(host_ip->gid);
2724 target_ip->cuid = tswap32(host_ip->cuid);
2725 target_ip->cgid = tswap32(host_ip->cgid);
2726 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2727 target_ip->mode = tswap32(host_ip->mode);
2729 target_ip->mode = tswap16(host_ip->mode);
2731 #if defined(TARGET_PPC)
2732 target_ip->__seq = tswap32(host_ip->__seq);
2734 target_ip->__seq = tswap16(host_ip->__seq);
2736 unlock_user_struct(target_sd, target_addr, 1);
2740 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2741 abi_ulong target_addr)
2743 struct target_semid_ds *target_sd;
2745 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2746 return -TARGET_EFAULT;
2747 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2748 return -TARGET_EFAULT;
2749 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2750 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2751 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2752 unlock_user_struct(target_sd, target_addr, 0);
2756 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2757 struct semid_ds *host_sd)
2759 struct target_semid_ds *target_sd;
2761 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2762 return -TARGET_EFAULT;
2763 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2764 return -TARGET_EFAULT;
2765 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2766 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2767 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2768 unlock_user_struct(target_sd, target_addr, 1);
2772 struct target_seminfo {
2785 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2786 struct seminfo *host_seminfo)
2788 struct target_seminfo *target_seminfo;
2789 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2790 return -TARGET_EFAULT;
2791 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2792 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2793 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2794 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2795 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2796 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2797 __put_user(host_seminfo->semume, &target_seminfo->semume);
2798 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2799 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2800 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2801 unlock_user_struct(target_seminfo, target_addr, 1);
2807 struct semid_ds *buf;
2808 unsigned short *array;
2809 struct seminfo *__buf;
2812 union target_semun {
2819 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2820 abi_ulong target_addr)
2823 unsigned short *array;
2825 struct semid_ds semid_ds;
2828 semun.buf = &semid_ds;
2830 ret = semctl(semid, 0, IPC_STAT, semun);
2832 return get_errno(ret);
2834 nsems = semid_ds.sem_nsems;
2836 *host_array = g_try_new(unsigned short, nsems);
2838 return -TARGET_ENOMEM;
2840 array = lock_user(VERIFY_READ, target_addr,
2841 nsems*sizeof(unsigned short), 1);
2843 g_free(*host_array);
2844 return -TARGET_EFAULT;
2847 for(i=0; i<nsems; i++) {
2848 __get_user((*host_array)[i], &array[i]);
2850 unlock_user(array, target_addr, 0);
2855 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2856 unsigned short **host_array)
2859 unsigned short *array;
2861 struct semid_ds semid_ds;
2864 semun.buf = &semid_ds;
2866 ret = semctl(semid, 0, IPC_STAT, semun);
2868 return get_errno(ret);
2870 nsems = semid_ds.sem_nsems;
2872 array = lock_user(VERIFY_WRITE, target_addr,
2873 nsems*sizeof(unsigned short), 0);
2875 return -TARGET_EFAULT;
2877 for(i=0; i<nsems; i++) {
2878 __put_user((*host_array)[i], &array[i]);
2880 g_free(*host_array);
2881 unlock_user(array, target_addr, 1);
2886 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2887 abi_ulong target_arg)
2889 union target_semun target_su = { .buf = target_arg };
2891 struct semid_ds dsarg;
2892 unsigned short *array = NULL;
2893 struct seminfo seminfo;
2894 abi_long ret = -TARGET_EINVAL;
2901 /* In 64 bit cross-endian situations, we will erroneously pick up
2902 * the wrong half of the union for the "val" element. To rectify
2903 * this, the entire 8-byte structure is byteswapped, followed by
2904 * a swap of the 4 byte val field. In other cases, the data is
2905 * already in proper host byte order. */
2906 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2907 target_su.buf = tswapal(target_su.buf);
2908 arg.val = tswap32(target_su.val);
2910 arg.val = target_su.val;
2912 ret = get_errno(semctl(semid, semnum, cmd, arg));
2916 err = target_to_host_semarray(semid, &array, target_su.array);
2920 ret = get_errno(semctl(semid, semnum, cmd, arg));
2921 err = host_to_target_semarray(semid, target_su.array, &array);
2928 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2932 ret = get_errno(semctl(semid, semnum, cmd, arg));
2933 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2939 arg.__buf = &seminfo;
2940 ret = get_errno(semctl(semid, semnum, cmd, arg));
2941 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2949 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2956 struct target_sembuf {
2957 unsigned short sem_num;
2962 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2963 abi_ulong target_addr,
2966 struct target_sembuf *target_sembuf;
2969 target_sembuf = lock_user(VERIFY_READ, target_addr,
2970 nsops*sizeof(struct target_sembuf), 1);
2972 return -TARGET_EFAULT;
2974 for(i=0; i<nsops; i++) {
2975 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2976 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2977 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2980 unlock_user(target_sembuf, target_addr, 0);
2985 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2987 struct sembuf sops[nsops];
2989 if (target_to_host_sembuf(sops, ptr, nsops))
2990 return -TARGET_EFAULT;
2992 return get_errno(semop(semid, sops, nsops));
2995 struct target_msqid_ds
2997 struct target_ipc_perm msg_perm;
2998 abi_ulong msg_stime;
2999 #if TARGET_ABI_BITS == 32
3000 abi_ulong __unused1;
3002 abi_ulong msg_rtime;
3003 #if TARGET_ABI_BITS == 32
3004 abi_ulong __unused2;
3006 abi_ulong msg_ctime;
3007 #if TARGET_ABI_BITS == 32
3008 abi_ulong __unused3;
3010 abi_ulong __msg_cbytes;
3012 abi_ulong msg_qbytes;
3013 abi_ulong msg_lspid;
3014 abi_ulong msg_lrpid;
3015 abi_ulong __unused4;
3016 abi_ulong __unused5;
3019 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3020 abi_ulong target_addr)
3022 struct target_msqid_ds *target_md;
3024 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3025 return -TARGET_EFAULT;
3026 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3027 return -TARGET_EFAULT;
3028 host_md->msg_stime = tswapal(target_md->msg_stime);
3029 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3030 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3031 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3032 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3033 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3034 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3035 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3036 unlock_user_struct(target_md, target_addr, 0);
3040 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3041 struct msqid_ds *host_md)
3043 struct target_msqid_ds *target_md;
3045 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3046 return -TARGET_EFAULT;
3047 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3048 return -TARGET_EFAULT;
3049 target_md->msg_stime = tswapal(host_md->msg_stime);
3050 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3051 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3052 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3053 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3054 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3055 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3056 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3057 unlock_user_struct(target_md, target_addr, 1);
3061 struct target_msginfo {
3069 unsigned short int msgseg;
3072 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3073 struct msginfo *host_msginfo)
3075 struct target_msginfo *target_msginfo;
3076 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3077 return -TARGET_EFAULT;
3078 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3079 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3080 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3081 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3082 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3083 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3084 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3085 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3086 unlock_user_struct(target_msginfo, target_addr, 1);
3090 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3092 struct msqid_ds dsarg;
3093 struct msginfo msginfo;
3094 abi_long ret = -TARGET_EINVAL;
3102 if (target_to_host_msqid_ds(&dsarg,ptr))
3103 return -TARGET_EFAULT;
3104 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3105 if (host_to_target_msqid_ds(ptr,&dsarg))
3106 return -TARGET_EFAULT;
3109 ret = get_errno(msgctl(msgid, cmd, NULL));
3113 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3114 if (host_to_target_msginfo(ptr, &msginfo))
3115 return -TARGET_EFAULT;
3122 struct target_msgbuf {
3127 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3128 ssize_t msgsz, int msgflg)
3130 struct target_msgbuf *target_mb;
3131 struct msgbuf *host_mb;
3135 return -TARGET_EINVAL;
3138 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3139 return -TARGET_EFAULT;
3140 host_mb = g_try_malloc(msgsz + sizeof(long));
3142 unlock_user_struct(target_mb, msgp, 0);
3143 return -TARGET_ENOMEM;
3145 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3146 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3147 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
3149 unlock_user_struct(target_mb, msgp, 0);
3154 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3155 ssize_t msgsz, abi_long msgtyp,
3158 struct target_msgbuf *target_mb;
3160 struct msgbuf *host_mb;
3164 return -TARGET_EINVAL;
3167 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3168 return -TARGET_EFAULT;
3170 host_mb = g_try_malloc(msgsz + sizeof(long));
3172 ret = -TARGET_ENOMEM;
3175 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3178 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3179 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3180 if (!target_mtext) {
3181 ret = -TARGET_EFAULT;
3184 memcpy(target_mb->mtext, host_mb->mtext, ret);
3185 unlock_user(target_mtext, target_mtext_addr, ret);
3188 target_mb->mtype = tswapal(host_mb->mtype);
3192 unlock_user_struct(target_mb, msgp, 1);
3197 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3198 abi_ulong target_addr)
3200 struct target_shmid_ds *target_sd;
3202 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3203 return -TARGET_EFAULT;
3204 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3205 return -TARGET_EFAULT;
3206 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3207 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3208 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3209 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3210 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3211 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3212 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3213 unlock_user_struct(target_sd, target_addr, 0);
3217 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3218 struct shmid_ds *host_sd)
3220 struct target_shmid_ds *target_sd;
3222 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3223 return -TARGET_EFAULT;
3224 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3225 return -TARGET_EFAULT;
3226 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3227 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3228 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3229 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3230 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3231 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3232 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3233 unlock_user_struct(target_sd, target_addr, 1);
3237 struct target_shminfo {
3245 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3246 struct shminfo *host_shminfo)
3248 struct target_shminfo *target_shminfo;
3249 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3250 return -TARGET_EFAULT;
3251 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3252 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3253 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3254 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3255 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3256 unlock_user_struct(target_shminfo, target_addr, 1);
3260 struct target_shm_info {
3265 abi_ulong swap_attempts;
3266 abi_ulong swap_successes;
3269 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3270 struct shm_info *host_shm_info)
3272 struct target_shm_info *target_shm_info;
3273 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3274 return -TARGET_EFAULT;
3275 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3276 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3277 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3278 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3279 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3280 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3281 unlock_user_struct(target_shm_info, target_addr, 1);
3285 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3287 struct shmid_ds dsarg;
3288 struct shminfo shminfo;
3289 struct shm_info shm_info;
3290 abi_long ret = -TARGET_EINVAL;
3298 if (target_to_host_shmid_ds(&dsarg, buf))
3299 return -TARGET_EFAULT;
3300 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3301 if (host_to_target_shmid_ds(buf, &dsarg))
3302 return -TARGET_EFAULT;
3305 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3306 if (host_to_target_shminfo(buf, &shminfo))
3307 return -TARGET_EFAULT;
3310 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3311 if (host_to_target_shm_info(buf, &shm_info))
3312 return -TARGET_EFAULT;
3317 ret = get_errno(shmctl(shmid, cmd, NULL));
3324 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3328 struct shmid_ds shm_info;
3331 /* find out the length of the shared memory segment */
3332 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3333 if (is_error(ret)) {
3334 /* can't get length, bail out */
3341 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3343 abi_ulong mmap_start;
3345 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3347 if (mmap_start == -1) {
3349 host_raddr = (void *)-1;
3351 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3354 if (host_raddr == (void *)-1) {
3356 return get_errno((long)host_raddr);
3358 raddr=h2g((unsigned long)host_raddr);
3360 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3361 PAGE_VALID | PAGE_READ |
3362 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3364 for (i = 0; i < N_SHM_REGIONS; i++) {
3365 if (!shm_regions[i].in_use) {
3366 shm_regions[i].in_use = true;
3367 shm_regions[i].start = raddr;
3368 shm_regions[i].size = shm_info.shm_segsz;
3378 static inline abi_long do_shmdt(abi_ulong shmaddr)
3382 for (i = 0; i < N_SHM_REGIONS; ++i) {
3383 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3384 shm_regions[i].in_use = false;
3385 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3390 return get_errno(shmdt(g2h(shmaddr)));
3393 #ifdef TARGET_NR_ipc
3394 /* ??? This only works with linear mappings. */
3395 /* do_ipc() must return target values and target errnos. */
3396 static abi_long do_ipc(unsigned int call, abi_long first,
3397 abi_long second, abi_long third,
3398 abi_long ptr, abi_long fifth)
3403 version = call >> 16;
3408 ret = do_semop(first, ptr, second);
3412 ret = get_errno(semget(first, second, third));
3415 case IPCOP_semctl: {
3416 /* The semun argument to semctl is passed by value, so dereference the
3419 get_user_ual(atptr, ptr);
3420 ret = do_semctl(first, second, third, atptr);
3425 ret = get_errno(msgget(first, second));
3429 ret = do_msgsnd(first, ptr, second, third);
3433 ret = do_msgctl(first, second, ptr);
3440 struct target_ipc_kludge {
3445 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3446 ret = -TARGET_EFAULT;
3450 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3452 unlock_user_struct(tmp, ptr, 0);
3456 ret = do_msgrcv(first, ptr, second, fifth, third);
3465 raddr = do_shmat(first, ptr, second);
3466 if (is_error(raddr))
3467 return get_errno(raddr);
3468 if (put_user_ual(raddr, third))
3469 return -TARGET_EFAULT;
3473 ret = -TARGET_EINVAL;
3478 ret = do_shmdt(ptr);
3482 /* IPC_* flag values are the same on all linux platforms */
3483 ret = get_errno(shmget(first, second, third));
3486 /* IPC_* and SHM_* command values are the same on all linux platforms */
3488 ret = do_shmctl(first, second, ptr);
3491 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3492 ret = -TARGET_ENOSYS;
3499 /* kernel structure types definitions */
3501 #define STRUCT(name, ...) STRUCT_ ## name,
3502 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3504 #include "syscall_types.h"
3508 #undef STRUCT_SPECIAL
3510 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3511 #define STRUCT_SPECIAL(name)
3512 #include "syscall_types.h"
3514 #undef STRUCT_SPECIAL
3516 typedef struct IOCTLEntry IOCTLEntry;
3518 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3519 int fd, int cmd, abi_long arg);
3523 unsigned int host_cmd;
3526 do_ioctl_fn *do_ioctl;
3527 const argtype arg_type[5];
3530 #define IOC_R 0x0001
3531 #define IOC_W 0x0002
3532 #define IOC_RW (IOC_R | IOC_W)
3534 #define MAX_STRUCT_SIZE 4096
3536 #ifdef CONFIG_FIEMAP
3537 /* So fiemap access checks don't overflow on 32 bit systems.
3538 * This is very slightly smaller than the limit imposed by
3539 * the underlying kernel.
3541 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3542 / sizeof(struct fiemap_extent))
3544 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3545 int fd, int cmd, abi_long arg)
3547 /* The parameter for this ioctl is a struct fiemap followed
3548 * by an array of struct fiemap_extent whose size is set
3549 * in fiemap->fm_extent_count. The array is filled in by the
3552 int target_size_in, target_size_out;
3554 const argtype *arg_type = ie->arg_type;
3555 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3558 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3562 assert(arg_type[0] == TYPE_PTR);
3563 assert(ie->access == IOC_RW);
3565 target_size_in = thunk_type_size(arg_type, 0);
3566 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3568 return -TARGET_EFAULT;
3570 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3571 unlock_user(argptr, arg, 0);
3572 fm = (struct fiemap *)buf_temp;
3573 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3574 return -TARGET_EINVAL;
3577 outbufsz = sizeof (*fm) +
3578 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3580 if (outbufsz > MAX_STRUCT_SIZE) {
3581 /* We can't fit all the extents into the fixed size buffer.
3582 * Allocate one that is large enough and use it instead.
3584 fm = g_try_malloc(outbufsz);
3586 return -TARGET_ENOMEM;
3588 memcpy(fm, buf_temp, sizeof(struct fiemap));
3591 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3592 if (!is_error(ret)) {
3593 target_size_out = target_size_in;
3594 /* An extent_count of 0 means we were only counting the extents
3595 * so there are no structs to copy
3597 if (fm->fm_extent_count != 0) {
3598 target_size_out += fm->fm_mapped_extents * extent_size;
3600 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3602 ret = -TARGET_EFAULT;
3604 /* Convert the struct fiemap */
3605 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3606 if (fm->fm_extent_count != 0) {
3607 p = argptr + target_size_in;
3608 /* ...and then all the struct fiemap_extents */
3609 for (i = 0; i < fm->fm_mapped_extents; i++) {
3610 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3615 unlock_user(argptr, arg, target_size_out);
3625 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3626 int fd, int cmd, abi_long arg)
3628 const argtype *arg_type = ie->arg_type;
3632 struct ifconf *host_ifconf;
3634 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3635 int target_ifreq_size;
3640 abi_long target_ifc_buf;
3644 assert(arg_type[0] == TYPE_PTR);
3645 assert(ie->access == IOC_RW);
3648 target_size = thunk_type_size(arg_type, 0);
3650 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3652 return -TARGET_EFAULT;
3653 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3654 unlock_user(argptr, arg, 0);
3656 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3657 target_ifc_len = host_ifconf->ifc_len;
3658 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3660 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3661 nb_ifreq = target_ifc_len / target_ifreq_size;
3662 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3664 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3665 if (outbufsz > MAX_STRUCT_SIZE) {
3666 /* We can't fit all the extents into the fixed size buffer.
3667 * Allocate one that is large enough and use it instead.
3669 host_ifconf = malloc(outbufsz);
3671 return -TARGET_ENOMEM;
3673 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3676 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3678 host_ifconf->ifc_len = host_ifc_len;
3679 host_ifconf->ifc_buf = host_ifc_buf;
3681 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3682 if (!is_error(ret)) {
3683 /* convert host ifc_len to target ifc_len */
3685 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3686 target_ifc_len = nb_ifreq * target_ifreq_size;
3687 host_ifconf->ifc_len = target_ifc_len;
3689 /* restore target ifc_buf */
3691 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3693 /* copy struct ifconf to target user */
3695 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3697 return -TARGET_EFAULT;
3698 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3699 unlock_user(argptr, arg, target_size);
3701 /* copy ifreq[] to target user */
3703 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3704 for (i = 0; i < nb_ifreq ; i++) {
3705 thunk_convert(argptr + i * target_ifreq_size,
3706 host_ifc_buf + i * sizeof(struct ifreq),
3707 ifreq_arg_type, THUNK_TARGET);
3709 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3719 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3720 int cmd, abi_long arg)
3723 struct dm_ioctl *host_dm;
3724 abi_long guest_data;
3725 uint32_t guest_data_size;
3727 const argtype *arg_type = ie->arg_type;
3729 void *big_buf = NULL;
3733 target_size = thunk_type_size(arg_type, 0);
3734 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3736 ret = -TARGET_EFAULT;
3739 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3740 unlock_user(argptr, arg, 0);
3742 /* buf_temp is too small, so fetch things into a bigger buffer */
3743 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3744 memcpy(big_buf, buf_temp, target_size);
3748 guest_data = arg + host_dm->data_start;
3749 if ((guest_data - arg) < 0) {
3753 guest_data_size = host_dm->data_size - host_dm->data_start;
3754 host_data = (char*)host_dm + host_dm->data_start;
3756 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3757 switch (ie->host_cmd) {
3759 case DM_LIST_DEVICES:
3762 case DM_DEV_SUSPEND:
3765 case DM_TABLE_STATUS:
3766 case DM_TABLE_CLEAR:
3768 case DM_LIST_VERSIONS:
3772 case DM_DEV_SET_GEOMETRY:
3773 /* data contains only strings */
3774 memcpy(host_data, argptr, guest_data_size);
3777 memcpy(host_data, argptr, guest_data_size);
3778 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3782 void *gspec = argptr;
3783 void *cur_data = host_data;
3784 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3785 int spec_size = thunk_type_size(arg_type, 0);
3788 for (i = 0; i < host_dm->target_count; i++) {
3789 struct dm_target_spec *spec = cur_data;
3793 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3794 slen = strlen((char*)gspec + spec_size) + 1;
3796 spec->next = sizeof(*spec) + slen;
3797 strcpy((char*)&spec[1], gspec + spec_size);
3799 cur_data += spec->next;
3804 ret = -TARGET_EINVAL;
3805 unlock_user(argptr, guest_data, 0);
3808 unlock_user(argptr, guest_data, 0);
3810 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3811 if (!is_error(ret)) {
3812 guest_data = arg + host_dm->data_start;
3813 guest_data_size = host_dm->data_size - host_dm->data_start;
3814 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3815 switch (ie->host_cmd) {
3820 case DM_DEV_SUSPEND:
3823 case DM_TABLE_CLEAR:
3825 case DM_DEV_SET_GEOMETRY:
3826 /* no return data */
3828 case DM_LIST_DEVICES:
3830 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3831 uint32_t remaining_data = guest_data_size;
3832 void *cur_data = argptr;
3833 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3834 int nl_size = 12; /* can't use thunk_size due to alignment */
3837 uint32_t next = nl->next;
3839 nl->next = nl_size + (strlen(nl->name) + 1);
3841 if (remaining_data < nl->next) {
3842 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3845 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3846 strcpy(cur_data + nl_size, nl->name);
3847 cur_data += nl->next;
3848 remaining_data -= nl->next;
3852 nl = (void*)nl + next;
3857 case DM_TABLE_STATUS:
3859 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3860 void *cur_data = argptr;
3861 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3862 int spec_size = thunk_type_size(arg_type, 0);
3865 for (i = 0; i < host_dm->target_count; i++) {
3866 uint32_t next = spec->next;
3867 int slen = strlen((char*)&spec[1]) + 1;
3868 spec->next = (cur_data - argptr) + spec_size + slen;
3869 if (guest_data_size < spec->next) {
3870 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3873 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3874 strcpy(cur_data + spec_size, (char*)&spec[1]);
3875 cur_data = argptr + spec->next;
3876 spec = (void*)host_dm + host_dm->data_start + next;
3882 void *hdata = (void*)host_dm + host_dm->data_start;
3883 int count = *(uint32_t*)hdata;
3884 uint64_t *hdev = hdata + 8;
3885 uint64_t *gdev = argptr + 8;
3888 *(uint32_t*)argptr = tswap32(count);
3889 for (i = 0; i < count; i++) {
3890 *gdev = tswap64(*hdev);
3896 case DM_LIST_VERSIONS:
3898 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3899 uint32_t remaining_data = guest_data_size;
3900 void *cur_data = argptr;
3901 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3902 int vers_size = thunk_type_size(arg_type, 0);
3905 uint32_t next = vers->next;
3907 vers->next = vers_size + (strlen(vers->name) + 1);
3909 if (remaining_data < vers->next) {
3910 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3913 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3914 strcpy(cur_data + vers_size, vers->name);
3915 cur_data += vers->next;
3916 remaining_data -= vers->next;
3920 vers = (void*)vers + next;
3925 unlock_user(argptr, guest_data, 0);
3926 ret = -TARGET_EINVAL;
3929 unlock_user(argptr, guest_data, guest_data_size);
3931 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3933 ret = -TARGET_EFAULT;
3936 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3937 unlock_user(argptr, arg, target_size);
3944 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3945 int cmd, abi_long arg)
3949 const argtype *arg_type = ie->arg_type;
3950 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3953 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3954 struct blkpg_partition host_part;
3956 /* Read and convert blkpg */
3958 target_size = thunk_type_size(arg_type, 0);
3959 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3961 ret = -TARGET_EFAULT;
3964 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3965 unlock_user(argptr, arg, 0);
3967 switch (host_blkpg->op) {
3968 case BLKPG_ADD_PARTITION:
3969 case BLKPG_DEL_PARTITION:
3970 /* payload is struct blkpg_partition */
3973 /* Unknown opcode */
3974 ret = -TARGET_EINVAL;
3978 /* Read and convert blkpg->data */
3979 arg = (abi_long)(uintptr_t)host_blkpg->data;
3980 target_size = thunk_type_size(part_arg_type, 0);
3981 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3983 ret = -TARGET_EFAULT;
3986 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3987 unlock_user(argptr, arg, 0);
3989 /* Swizzle the data pointer to our local copy and call! */
3990 host_blkpg->data = &host_part;
3991 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3997 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3998 int fd, int cmd, abi_long arg)
4000 const argtype *arg_type = ie->arg_type;
4001 const StructEntry *se;
4002 const argtype *field_types;
4003 const int *dst_offsets, *src_offsets;
4006 abi_ulong *target_rt_dev_ptr;
4007 unsigned long *host_rt_dev_ptr;
4011 assert(ie->access == IOC_W);
4012 assert(*arg_type == TYPE_PTR);
4014 assert(*arg_type == TYPE_STRUCT);
4015 target_size = thunk_type_size(arg_type, 0);
4016 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4018 return -TARGET_EFAULT;
4021 assert(*arg_type == (int)STRUCT_rtentry);
4022 se = struct_entries + *arg_type++;
4023 assert(se->convert[0] == NULL);
4024 /* convert struct here to be able to catch rt_dev string */
4025 field_types = se->field_types;
4026 dst_offsets = se->field_offsets[THUNK_HOST];
4027 src_offsets = se->field_offsets[THUNK_TARGET];
4028 for (i = 0; i < se->nb_fields; i++) {
4029 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4030 assert(*field_types == TYPE_PTRVOID);
4031 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4032 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4033 if (*target_rt_dev_ptr != 0) {
4034 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4035 tswapal(*target_rt_dev_ptr));
4036 if (!*host_rt_dev_ptr) {
4037 unlock_user(argptr, arg, 0);
4038 return -TARGET_EFAULT;
4041 *host_rt_dev_ptr = 0;
4046 field_types = thunk_convert(buf_temp + dst_offsets[i],
4047 argptr + src_offsets[i],
4048 field_types, THUNK_HOST);
4050 unlock_user(argptr, arg, 0);
4052 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4053 if (*host_rt_dev_ptr != 0) {
4054 unlock_user((void *)*host_rt_dev_ptr,
4055 *target_rt_dev_ptr, 0);
4060 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4061 int fd, int cmd, abi_long arg)
4063 int sig = target_to_host_signal(arg);
4064 return get_errno(ioctl(fd, ie->host_cmd, sig));
4067 static IOCTLEntry ioctl_entries[] = {
4068 #define IOCTL(cmd, access, ...) \
4069 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4070 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4071 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4076 /* ??? Implement proper locking for ioctls. */
4077 /* do_ioctl() Must return target values and target errnos. */
4078 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4080 const IOCTLEntry *ie;
4081 const argtype *arg_type;
4083 uint8_t buf_temp[MAX_STRUCT_SIZE];
4089 if (ie->target_cmd == 0) {
4090 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4091 return -TARGET_ENOSYS;
4093 if (ie->target_cmd == cmd)
4097 arg_type = ie->arg_type;
4099 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4102 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4105 switch(arg_type[0]) {
4108 ret = get_errno(ioctl(fd, ie->host_cmd));
4112 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4116 target_size = thunk_type_size(arg_type, 0);
4117 switch(ie->access) {
4119 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4120 if (!is_error(ret)) {
4121 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4123 return -TARGET_EFAULT;
4124 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4125 unlock_user(argptr, arg, target_size);
4129 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4131 return -TARGET_EFAULT;
4132 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4133 unlock_user(argptr, arg, 0);
4134 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4138 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4140 return -TARGET_EFAULT;
4141 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4142 unlock_user(argptr, arg, 0);
4143 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4144 if (!is_error(ret)) {
4145 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4147 return -TARGET_EFAULT;
4148 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4149 unlock_user(argptr, arg, target_size);
4155 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4156 (long)cmd, arg_type[0]);
4157 ret = -TARGET_ENOSYS;
4163 static const bitmask_transtbl iflag_tbl[] = {
4164 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4165 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4166 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4167 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4168 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4169 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4170 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4171 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4172 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4173 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4174 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4175 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4176 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4177 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4181 static const bitmask_transtbl oflag_tbl[] = {
4182 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4183 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4184 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4185 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4186 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4187 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4188 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4189 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4190 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4191 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4192 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4193 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4194 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4195 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4196 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4197 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4198 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4199 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4200 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4201 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4202 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4203 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4204 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4205 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4209 static const bitmask_transtbl cflag_tbl[] = {
4210 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4211 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4212 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4213 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4214 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4215 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4216 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4217 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4218 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4219 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4220 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4221 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4222 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4223 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4224 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4225 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4226 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4227 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4228 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4229 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4230 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4231 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4232 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4233 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4234 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4235 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4236 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4237 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4238 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4239 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4240 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4244 static const bitmask_transtbl lflag_tbl[] = {
4245 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4246 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4247 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4248 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4249 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4250 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4251 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4252 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4253 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4254 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4255 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4256 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4257 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4258 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4259 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4263 static void target_to_host_termios (void *dst, const void *src)
4265 struct host_termios *host = dst;
4266 const struct target_termios *target = src;
4269 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4271 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4273 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4275 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4276 host->c_line = target->c_line;
4278 memset(host->c_cc, 0, sizeof(host->c_cc));
4279 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4280 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4281 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4282 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4283 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4284 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4285 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4286 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4287 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4288 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4289 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4290 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4291 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4292 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4293 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4294 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4295 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4298 static void host_to_target_termios (void *dst, const void *src)
4300 struct target_termios *target = dst;
4301 const struct host_termios *host = src;
4304 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4306 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4308 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4310 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4311 target->c_line = host->c_line;
4313 memset(target->c_cc, 0, sizeof(target->c_cc));
4314 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4315 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4316 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4317 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4318 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4319 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4320 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4321 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4322 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4323 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4324 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4325 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4326 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4327 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4328 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4329 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4330 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4333 static const StructEntry struct_termios_def = {
4334 .convert = { host_to_target_termios, target_to_host_termios },
4335 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4336 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4339 static bitmask_transtbl mmap_flags_tbl[] = {
4340 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4341 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4342 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4343 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4344 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4345 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4346 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4347 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4348 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4353 #if defined(TARGET_I386)
4355 /* NOTE: there is really one LDT for all the threads */
4356 static uint8_t *ldt_table;
4358 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4365 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4366 if (size > bytecount)
4368 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4370 return -TARGET_EFAULT;
4371 /* ??? Should this by byteswapped? */
4372 memcpy(p, ldt_table, size);
4373 unlock_user(p, ptr, size);
4377 /* XXX: add locking support */
4378 static abi_long write_ldt(CPUX86State *env,
4379 abi_ulong ptr, unsigned long bytecount, int oldmode)
4381 struct target_modify_ldt_ldt_s ldt_info;
4382 struct target_modify_ldt_ldt_s *target_ldt_info;
4383 int seg_32bit, contents, read_exec_only, limit_in_pages;
4384 int seg_not_present, useable, lm;
4385 uint32_t *lp, entry_1, entry_2;
4387 if (bytecount != sizeof(ldt_info))
4388 return -TARGET_EINVAL;
4389 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4390 return -TARGET_EFAULT;
4391 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4392 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4393 ldt_info.limit = tswap32(target_ldt_info->limit);
4394 ldt_info.flags = tswap32(target_ldt_info->flags);
4395 unlock_user_struct(target_ldt_info, ptr, 0);
4397 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4398 return -TARGET_EINVAL;
4399 seg_32bit = ldt_info.flags & 1;
4400 contents = (ldt_info.flags >> 1) & 3;
4401 read_exec_only = (ldt_info.flags >> 3) & 1;
4402 limit_in_pages = (ldt_info.flags >> 4) & 1;
4403 seg_not_present = (ldt_info.flags >> 5) & 1;
4404 useable = (ldt_info.flags >> 6) & 1;
4408 lm = (ldt_info.flags >> 7) & 1;
4410 if (contents == 3) {
4412 return -TARGET_EINVAL;
4413 if (seg_not_present == 0)
4414 return -TARGET_EINVAL;
4416 /* allocate the LDT */
4418 env->ldt.base = target_mmap(0,
4419 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4420 PROT_READ|PROT_WRITE,
4421 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4422 if (env->ldt.base == -1)
4423 return -TARGET_ENOMEM;
4424 memset(g2h(env->ldt.base), 0,
4425 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4426 env->ldt.limit = 0xffff;
4427 ldt_table = g2h(env->ldt.base);
4430 /* NOTE: same code as Linux kernel */
4431 /* Allow LDTs to be cleared by the user. */
4432 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4435 read_exec_only == 1 &&
4437 limit_in_pages == 0 &&
4438 seg_not_present == 1 &&
4446 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4447 (ldt_info.limit & 0x0ffff);
4448 entry_2 = (ldt_info.base_addr & 0xff000000) |
4449 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4450 (ldt_info.limit & 0xf0000) |
4451 ((read_exec_only ^ 1) << 9) |
4453 ((seg_not_present ^ 1) << 15) |
4455 (limit_in_pages << 23) |
4459 entry_2 |= (useable << 20);
4461 /* Install the new entry ... */
4463 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4464 lp[0] = tswap32(entry_1);
4465 lp[1] = tswap32(entry_2);
4469 /* specific and weird i386 syscalls */
4470 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4471 unsigned long bytecount)
4477 ret = read_ldt(ptr, bytecount);
4480 ret = write_ldt(env, ptr, bytecount, 1);
4483 ret = write_ldt(env, ptr, bytecount, 0);
4486 ret = -TARGET_ENOSYS;
4492 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4493 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4495 uint64_t *gdt_table = g2h(env->gdt.base);
4496 struct target_modify_ldt_ldt_s ldt_info;
4497 struct target_modify_ldt_ldt_s *target_ldt_info;
4498 int seg_32bit, contents, read_exec_only, limit_in_pages;
4499 int seg_not_present, useable, lm;
4500 uint32_t *lp, entry_1, entry_2;
4503 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4504 if (!target_ldt_info)
4505 return -TARGET_EFAULT;
4506 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4507 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4508 ldt_info.limit = tswap32(target_ldt_info->limit);
4509 ldt_info.flags = tswap32(target_ldt_info->flags);
4510 if (ldt_info.entry_number == -1) {
4511 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4512 if (gdt_table[i] == 0) {
4513 ldt_info.entry_number = i;
4514 target_ldt_info->entry_number = tswap32(i);
4519 unlock_user_struct(target_ldt_info, ptr, 1);
4521 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4522 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4523 return -TARGET_EINVAL;
4524 seg_32bit = ldt_info.flags & 1;
4525 contents = (ldt_info.flags >> 1) & 3;
4526 read_exec_only = (ldt_info.flags >> 3) & 1;
4527 limit_in_pages = (ldt_info.flags >> 4) & 1;
4528 seg_not_present = (ldt_info.flags >> 5) & 1;
4529 useable = (ldt_info.flags >> 6) & 1;
4533 lm = (ldt_info.flags >> 7) & 1;
4536 if (contents == 3) {
4537 if (seg_not_present == 0)
4538 return -TARGET_EINVAL;
4541 /* NOTE: same code as Linux kernel */
4542 /* Allow LDTs to be cleared by the user. */
4543 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4544 if ((contents == 0 &&
4545 read_exec_only == 1 &&
4547 limit_in_pages == 0 &&
4548 seg_not_present == 1 &&
4556 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4557 (ldt_info.limit & 0x0ffff);
4558 entry_2 = (ldt_info.base_addr & 0xff000000) |
4559 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4560 (ldt_info.limit & 0xf0000) |
4561 ((read_exec_only ^ 1) << 9) |
4563 ((seg_not_present ^ 1) << 15) |
4565 (limit_in_pages << 23) |
4570 /* Install the new entry ... */
4572 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4573 lp[0] = tswap32(entry_1);
4574 lp[1] = tswap32(entry_2);
4578 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4580 struct target_modify_ldt_ldt_s *target_ldt_info;
4581 uint64_t *gdt_table = g2h(env->gdt.base);
4582 uint32_t base_addr, limit, flags;
4583 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4584 int seg_not_present, useable, lm;
4585 uint32_t *lp, entry_1, entry_2;
4587 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4588 if (!target_ldt_info)
4589 return -TARGET_EFAULT;
4590 idx = tswap32(target_ldt_info->entry_number);
4591 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4592 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4593 unlock_user_struct(target_ldt_info, ptr, 1);
4594 return -TARGET_EINVAL;
4596 lp = (uint32_t *)(gdt_table + idx);
4597 entry_1 = tswap32(lp[0]);
4598 entry_2 = tswap32(lp[1]);
4600 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4601 contents = (entry_2 >> 10) & 3;
4602 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4603 seg_32bit = (entry_2 >> 22) & 1;
4604 limit_in_pages = (entry_2 >> 23) & 1;
4605 useable = (entry_2 >> 20) & 1;
4609 lm = (entry_2 >> 21) & 1;
4611 flags = (seg_32bit << 0) | (contents << 1) |
4612 (read_exec_only << 3) | (limit_in_pages << 4) |
4613 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4614 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4615 base_addr = (entry_1 >> 16) |
4616 (entry_2 & 0xff000000) |
4617 ((entry_2 & 0xff) << 16);
4618 target_ldt_info->base_addr = tswapal(base_addr);
4619 target_ldt_info->limit = tswap32(limit);
4620 target_ldt_info->flags = tswap32(flags);
4621 unlock_user_struct(target_ldt_info, ptr, 1);
4624 #endif /* TARGET_I386 && TARGET_ABI32 */
4626 #ifndef TARGET_ABI32
4627 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4634 case TARGET_ARCH_SET_GS:
4635 case TARGET_ARCH_SET_FS:
4636 if (code == TARGET_ARCH_SET_GS)
4640 cpu_x86_load_seg(env, idx, 0);
4641 env->segs[idx].base = addr;
4643 case TARGET_ARCH_GET_GS:
4644 case TARGET_ARCH_GET_FS:
4645 if (code == TARGET_ARCH_GET_GS)
4649 val = env->segs[idx].base;
4650 if (put_user(val, addr, abi_ulong))
4651 ret = -TARGET_EFAULT;
4654 ret = -TARGET_EINVAL;
4661 #endif /* defined(TARGET_I386) */
4663 #define NEW_STACK_SIZE 0x40000
4666 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4669 pthread_mutex_t mutex;
4670 pthread_cond_t cond;
4673 abi_ulong child_tidptr;
4674 abi_ulong parent_tidptr;
4678 static void *clone_func(void *arg)
4680 new_thread_info *info = arg;
4685 rcu_register_thread();
4687 cpu = ENV_GET_CPU(env);
4689 ts = (TaskState *)cpu->opaque;
4690 info->tid = gettid();
4691 cpu->host_tid = info->tid;
4693 if (info->child_tidptr)
4694 put_user_u32(info->tid, info->child_tidptr);
4695 if (info->parent_tidptr)
4696 put_user_u32(info->tid, info->parent_tidptr);
4697 /* Enable signals. */
4698 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4699 /* Signal to the parent that we're ready. */
4700 pthread_mutex_lock(&info->mutex);
4701 pthread_cond_broadcast(&info->cond);
4702 pthread_mutex_unlock(&info->mutex);
4703 /* Wait until the parent has finshed initializing the tls state. */
4704 pthread_mutex_lock(&clone_lock);
4705 pthread_mutex_unlock(&clone_lock);
4711 /* do_fork() Must return host values and target errnos (unlike most
4712 do_*() functions). */
4713 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4714 abi_ulong parent_tidptr, target_ulong newtls,
4715 abi_ulong child_tidptr)
4717 CPUState *cpu = ENV_GET_CPU(env);
4721 CPUArchState *new_env;
4722 unsigned int nptl_flags;
4725 /* Emulate vfork() with fork() */
4726 if (flags & CLONE_VFORK)
4727 flags &= ~(CLONE_VFORK | CLONE_VM);
4729 if (flags & CLONE_VM) {
4730 TaskState *parent_ts = (TaskState *)cpu->opaque;
4731 new_thread_info info;
4732 pthread_attr_t attr;
4734 ts = g_new0(TaskState, 1);
4735 init_task_state(ts);
4736 /* we create a new CPU instance. */
4737 new_env = cpu_copy(env);
4738 /* Init regs that differ from the parent. */
4739 cpu_clone_regs(new_env, newsp);
4740 new_cpu = ENV_GET_CPU(new_env);
4741 new_cpu->opaque = ts;
4742 ts->bprm = parent_ts->bprm;
4743 ts->info = parent_ts->info;
4745 flags &= ~CLONE_NPTL_FLAGS2;
4747 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4748 ts->child_tidptr = child_tidptr;
4751 if (nptl_flags & CLONE_SETTLS)
4752 cpu_set_tls (new_env, newtls);
4754 /* Grab a mutex so that thread setup appears atomic. */
4755 pthread_mutex_lock(&clone_lock);
4757 memset(&info, 0, sizeof(info));
4758 pthread_mutex_init(&info.mutex, NULL);
4759 pthread_mutex_lock(&info.mutex);
4760 pthread_cond_init(&info.cond, NULL);
4762 if (nptl_flags & CLONE_CHILD_SETTID)
4763 info.child_tidptr = child_tidptr;
4764 if (nptl_flags & CLONE_PARENT_SETTID)
4765 info.parent_tidptr = parent_tidptr;
4767 ret = pthread_attr_init(&attr);
4768 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4769 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4770 /* It is not safe to deliver signals until the child has finished
4771 initializing, so temporarily block all signals. */
4772 sigfillset(&sigmask);
4773 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4775 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4776 /* TODO: Free new CPU state if thread creation failed. */
4778 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4779 pthread_attr_destroy(&attr);
4781 /* Wait for the child to initialize. */
4782 pthread_cond_wait(&info.cond, &info.mutex);
4784 if (flags & CLONE_PARENT_SETTID)
4785 put_user_u32(ret, parent_tidptr);
4789 pthread_mutex_unlock(&info.mutex);
4790 pthread_cond_destroy(&info.cond);
4791 pthread_mutex_destroy(&info.mutex);
4792 pthread_mutex_unlock(&clone_lock);
4794 /* if no CLONE_VM, we consider it is a fork */
4795 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
4796 return -TARGET_EINVAL;
4801 /* Child Process. */
4803 cpu_clone_regs(env, newsp);
4805 /* There is a race condition here. The parent process could
4806 theoretically read the TID in the child process before the child
4807 tid is set. This would require using either ptrace
4808 (not implemented) or having *_tidptr to point at a shared memory
4809 mapping. We can't repeat the spinlock hack used above because
4810 the child process gets its own copy of the lock. */
4811 if (flags & CLONE_CHILD_SETTID)
4812 put_user_u32(gettid(), child_tidptr);
4813 if (flags & CLONE_PARENT_SETTID)
4814 put_user_u32(gettid(), parent_tidptr);
4815 ts = (TaskState *)cpu->opaque;
4816 if (flags & CLONE_SETTLS)
4817 cpu_set_tls (env, newtls);
4818 if (flags & CLONE_CHILD_CLEARTID)
4819 ts->child_tidptr = child_tidptr;
4827 /* warning : doesn't handle linux specific flags... */
4828 static int target_to_host_fcntl_cmd(int cmd)
4831 case TARGET_F_DUPFD:
4832 case TARGET_F_GETFD:
4833 case TARGET_F_SETFD:
4834 case TARGET_F_GETFL:
4835 case TARGET_F_SETFL:
4837 case TARGET_F_GETLK:
4839 case TARGET_F_SETLK:
4841 case TARGET_F_SETLKW:
4843 case TARGET_F_GETOWN:
4845 case TARGET_F_SETOWN:
4847 case TARGET_F_GETSIG:
4849 case TARGET_F_SETSIG:
4851 #if TARGET_ABI_BITS == 32
4852 case TARGET_F_GETLK64:
4854 case TARGET_F_SETLK64:
4856 case TARGET_F_SETLKW64:
4859 case TARGET_F_SETLEASE:
4861 case TARGET_F_GETLEASE:
4863 #ifdef F_DUPFD_CLOEXEC
4864 case TARGET_F_DUPFD_CLOEXEC:
4865 return F_DUPFD_CLOEXEC;
4867 case TARGET_F_NOTIFY:
4870 case TARGET_F_GETOWN_EX:
4874 case TARGET_F_SETOWN_EX:
4878 return -TARGET_EINVAL;
4880 return -TARGET_EINVAL;
4883 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4884 static const bitmask_transtbl flock_tbl[] = {
4885 TRANSTBL_CONVERT(F_RDLCK),
4886 TRANSTBL_CONVERT(F_WRLCK),
4887 TRANSTBL_CONVERT(F_UNLCK),
4888 TRANSTBL_CONVERT(F_EXLCK),
4889 TRANSTBL_CONVERT(F_SHLCK),
4893 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4896 struct target_flock *target_fl;
4897 struct flock64 fl64;
4898 struct target_flock64 *target_fl64;
4900 struct f_owner_ex fox;
4901 struct target_f_owner_ex *target_fox;
4904 int host_cmd = target_to_host_fcntl_cmd(cmd);
4906 if (host_cmd == -TARGET_EINVAL)
4910 case TARGET_F_GETLK:
4911 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4912 return -TARGET_EFAULT;
4914 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4915 fl.l_whence = tswap16(target_fl->l_whence);
4916 fl.l_start = tswapal(target_fl->l_start);
4917 fl.l_len = tswapal(target_fl->l_len);
4918 fl.l_pid = tswap32(target_fl->l_pid);
4919 unlock_user_struct(target_fl, arg, 0);
4920 ret = get_errno(fcntl(fd, host_cmd, &fl));
4922 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4923 return -TARGET_EFAULT;
4925 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4926 target_fl->l_whence = tswap16(fl.l_whence);
4927 target_fl->l_start = tswapal(fl.l_start);
4928 target_fl->l_len = tswapal(fl.l_len);
4929 target_fl->l_pid = tswap32(fl.l_pid);
4930 unlock_user_struct(target_fl, arg, 1);
4934 case TARGET_F_SETLK:
4935 case TARGET_F_SETLKW:
4936 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4937 return -TARGET_EFAULT;
4939 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4940 fl.l_whence = tswap16(target_fl->l_whence);
4941 fl.l_start = tswapal(target_fl->l_start);
4942 fl.l_len = tswapal(target_fl->l_len);
4943 fl.l_pid = tswap32(target_fl->l_pid);
4944 unlock_user_struct(target_fl, arg, 0);
4945 ret = get_errno(fcntl(fd, host_cmd, &fl));
4948 case TARGET_F_GETLK64:
4949 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4950 return -TARGET_EFAULT;
4952 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4953 fl64.l_whence = tswap16(target_fl64->l_whence);
4954 fl64.l_start = tswap64(target_fl64->l_start);
4955 fl64.l_len = tswap64(target_fl64->l_len);
4956 fl64.l_pid = tswap32(target_fl64->l_pid);
4957 unlock_user_struct(target_fl64, arg, 0);
4958 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4960 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4961 return -TARGET_EFAULT;
4962 target_fl64->l_type =
4963 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4964 target_fl64->l_whence = tswap16(fl64.l_whence);
4965 target_fl64->l_start = tswap64(fl64.l_start);
4966 target_fl64->l_len = tswap64(fl64.l_len);
4967 target_fl64->l_pid = tswap32(fl64.l_pid);
4968 unlock_user_struct(target_fl64, arg, 1);
4971 case TARGET_F_SETLK64:
4972 case TARGET_F_SETLKW64:
4973 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4974 return -TARGET_EFAULT;
4976 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4977 fl64.l_whence = tswap16(target_fl64->l_whence);
4978 fl64.l_start = tswap64(target_fl64->l_start);
4979 fl64.l_len = tswap64(target_fl64->l_len);
4980 fl64.l_pid = tswap32(target_fl64->l_pid);
4981 unlock_user_struct(target_fl64, arg, 0);
4982 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4985 case TARGET_F_GETFL:
4986 ret = get_errno(fcntl(fd, host_cmd, arg));
4988 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4992 case TARGET_F_SETFL:
4993 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4997 case TARGET_F_GETOWN_EX:
4998 ret = get_errno(fcntl(fd, host_cmd, &fox));
5000 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5001 return -TARGET_EFAULT;
5002 target_fox->type = tswap32(fox.type);
5003 target_fox->pid = tswap32(fox.pid);
5004 unlock_user_struct(target_fox, arg, 1);
5010 case TARGET_F_SETOWN_EX:
5011 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5012 return -TARGET_EFAULT;
5013 fox.type = tswap32(target_fox->type);
5014 fox.pid = tswap32(target_fox->pid);
5015 unlock_user_struct(target_fox, arg, 0);
5016 ret = get_errno(fcntl(fd, host_cmd, &fox));
5020 case TARGET_F_SETOWN:
5021 case TARGET_F_GETOWN:
5022 case TARGET_F_SETSIG:
5023 case TARGET_F_GETSIG:
5024 case TARGET_F_SETLEASE:
5025 case TARGET_F_GETLEASE:
5026 ret = get_errno(fcntl(fd, host_cmd, arg));
5030 ret = get_errno(fcntl(fd, cmd, arg));
5038 static inline int high2lowuid(int uid)
5046 static inline int high2lowgid(int gid)
5054 static inline int low2highuid(int uid)
5056 if ((int16_t)uid == -1)
5062 static inline int low2highgid(int gid)
5064 if ((int16_t)gid == -1)
5069 static inline int tswapid(int id)
5074 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5076 #else /* !USE_UID16 */
5077 static inline int high2lowuid(int uid)
5081 static inline int high2lowgid(int gid)
5085 static inline int low2highuid(int uid)
5089 static inline int low2highgid(int gid)
5093 static inline int tswapid(int id)
5098 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5100 #endif /* USE_UID16 */
5102 /* We must do direct syscalls for setting UID/GID, because we want to
5103 * implement the Linux system call semantics of "change only for this thread",
5104 * not the libc/POSIX semantics of "change for all threads in process".
5105 * (See http://ewontfix.com/17/ for more details.)
5106 * We use the 32-bit version of the syscalls if present; if it is not
5107 * then either the host architecture supports 32-bit UIDs natively with
5108 * the standard syscall, or the 16-bit UID is the best we can do.
5110 #ifdef __NR_setuid32
5111 #define __NR_sys_setuid __NR_setuid32
5113 #define __NR_sys_setuid __NR_setuid
5115 #ifdef __NR_setgid32
5116 #define __NR_sys_setgid __NR_setgid32
5118 #define __NR_sys_setgid __NR_setgid
5120 #ifdef __NR_setresuid32
5121 #define __NR_sys_setresuid __NR_setresuid32
5123 #define __NR_sys_setresuid __NR_setresuid
5125 #ifdef __NR_setresgid32
5126 #define __NR_sys_setresgid __NR_setresgid32
5128 #define __NR_sys_setresgid __NR_setresgid
5131 _syscall1(int, sys_setuid, uid_t, uid)
5132 _syscall1(int, sys_setgid, gid_t, gid)
5133 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5134 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5136 void syscall_init(void)
5139 const argtype *arg_type;
5143 thunk_init(STRUCT_MAX);
5145 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5146 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5147 #include "syscall_types.h"
5149 #undef STRUCT_SPECIAL
5151 /* Build target_to_host_errno_table[] table from
5152 * host_to_target_errno_table[]. */
5153 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5154 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5157 /* we patch the ioctl size if necessary. We rely on the fact that
5158 no ioctl has all the bits at '1' in the size field */
5160 while (ie->target_cmd != 0) {
5161 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5162 TARGET_IOC_SIZEMASK) {
5163 arg_type = ie->arg_type;
5164 if (arg_type[0] != TYPE_PTR) {
5165 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5170 size = thunk_type_size(arg_type, 0);
5171 ie->target_cmd = (ie->target_cmd &
5172 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5173 (size << TARGET_IOC_SIZESHIFT);
5176 /* automatic consistency check if same arch */
5177 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5178 (defined(__x86_64__) && defined(TARGET_X86_64))
5179 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5180 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5181 ie->name, ie->target_cmd, ie->host_cmd);
5188 #if TARGET_ABI_BITS == 32
5189 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5191 #ifdef TARGET_WORDS_BIGENDIAN
5192 return ((uint64_t)word0 << 32) | word1;
5194 return ((uint64_t)word1 << 32) | word0;
5197 #else /* TARGET_ABI_BITS == 32 */
5198 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5202 #endif /* TARGET_ABI_BITS != 32 */
5204 #ifdef TARGET_NR_truncate64
5205 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5210 if (regpairs_aligned(cpu_env)) {
5214 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5218 #ifdef TARGET_NR_ftruncate64
5219 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5224 if (regpairs_aligned(cpu_env)) {
5228 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5232 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5233 abi_ulong target_addr)
5235 struct target_timespec *target_ts;
5237 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5238 return -TARGET_EFAULT;
5239 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5240 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5241 unlock_user_struct(target_ts, target_addr, 0);
5245 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5246 struct timespec *host_ts)
5248 struct target_timespec *target_ts;
5250 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5251 return -TARGET_EFAULT;
5252 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5253 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5254 unlock_user_struct(target_ts, target_addr, 1);
5258 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5259 abi_ulong target_addr)
5261 struct target_itimerspec *target_itspec;
5263 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5264 return -TARGET_EFAULT;
5267 host_itspec->it_interval.tv_sec =
5268 tswapal(target_itspec->it_interval.tv_sec);
5269 host_itspec->it_interval.tv_nsec =
5270 tswapal(target_itspec->it_interval.tv_nsec);
5271 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5272 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5274 unlock_user_struct(target_itspec, target_addr, 1);
5278 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5279 struct itimerspec *host_its)
5281 struct target_itimerspec *target_itspec;
5283 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5284 return -TARGET_EFAULT;
5287 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5288 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5290 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5291 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5293 unlock_user_struct(target_itspec, target_addr, 0);
5297 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5298 abi_ulong target_addr)
5300 struct target_sigevent *target_sevp;
5302 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5303 return -TARGET_EFAULT;
5306 /* This union is awkward on 64 bit systems because it has a 32 bit
5307 * integer and a pointer in it; we follow the conversion approach
5308 * used for handling sigval types in signal.c so the guest should get
5309 * the correct value back even if we did a 64 bit byteswap and it's
5310 * using the 32 bit integer.
5312 host_sevp->sigev_value.sival_ptr =
5313 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5314 host_sevp->sigev_signo =
5315 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5316 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5317 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5319 unlock_user_struct(target_sevp, target_addr, 1);
5323 #if defined(TARGET_NR_mlockall)
5324 static inline int target_to_host_mlockall_arg(int arg)
5328 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5329 result |= MCL_CURRENT;
5331 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5332 result |= MCL_FUTURE;
5338 static inline abi_long host_to_target_stat64(void *cpu_env,
5339 abi_ulong target_addr,
5340 struct stat *host_st)
5342 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5343 if (((CPUARMState *)cpu_env)->eabi) {
5344 struct target_eabi_stat64 *target_st;
5346 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5347 return -TARGET_EFAULT;
5348 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5349 __put_user(host_st->st_dev, &target_st->st_dev);
5350 __put_user(host_st->st_ino, &target_st->st_ino);
5351 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5352 __put_user(host_st->st_ino, &target_st->__st_ino);
5354 __put_user(host_st->st_mode, &target_st->st_mode);
5355 __put_user(host_st->st_nlink, &target_st->st_nlink);
5356 __put_user(host_st->st_uid, &target_st->st_uid);
5357 __put_user(host_st->st_gid, &target_st->st_gid);
5358 __put_user(host_st->st_rdev, &target_st->st_rdev);
5359 __put_user(host_st->st_size, &target_st->st_size);
5360 __put_user(host_st->st_blksize, &target_st->st_blksize);
5361 __put_user(host_st->st_blocks, &target_st->st_blocks);
5362 __put_user(host_st->st_atime, &target_st->target_st_atime);
5363 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5364 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5365 unlock_user_struct(target_st, target_addr, 1);
5369 #if defined(TARGET_HAS_STRUCT_STAT64)
5370 struct target_stat64 *target_st;
5372 struct target_stat *target_st;
5375 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5376 return -TARGET_EFAULT;
5377 memset(target_st, 0, sizeof(*target_st));
5378 __put_user(host_st->st_dev, &target_st->st_dev);
5379 __put_user(host_st->st_ino, &target_st->st_ino);
5380 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5381 __put_user(host_st->st_ino, &target_st->__st_ino);
5383 __put_user(host_st->st_mode, &target_st->st_mode);
5384 __put_user(host_st->st_nlink, &target_st->st_nlink);
5385 __put_user(host_st->st_uid, &target_st->st_uid);
5386 __put_user(host_st->st_gid, &target_st->st_gid);
5387 __put_user(host_st->st_rdev, &target_st->st_rdev);
5388 /* XXX: better use of kernel struct */
5389 __put_user(host_st->st_size, &target_st->st_size);
5390 __put_user(host_st->st_blksize, &target_st->st_blksize);
5391 __put_user(host_st->st_blocks, &target_st->st_blocks);
5392 __put_user(host_st->st_atime, &target_st->target_st_atime);
5393 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5394 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5395 unlock_user_struct(target_st, target_addr, 1);
5401 /* ??? Using host futex calls even when target atomic operations
5402 are not really atomic probably breaks things. However implementing
5403 futexes locally would make futexes shared between multiple processes
5404 tricky. However they're probably useless because guest atomic
5405 operations won't work either. */
5406 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5407 target_ulong uaddr2, int val3)
5409 struct timespec ts, *pts;
5412 /* ??? We assume FUTEX_* constants are the same on both host
5414 #ifdef FUTEX_CMD_MASK
5415 base_op = op & FUTEX_CMD_MASK;
5421 case FUTEX_WAIT_BITSET:
5424 target_to_host_timespec(pts, timeout);
5428 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
5431 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5433 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5435 case FUTEX_CMP_REQUEUE:
5437 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5438 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5439 But the prototype takes a `struct timespec *'; insert casts
5440 to satisfy the compiler. We do not need to tswap TIMEOUT
5441 since it's not compared to guest memory. */
5442 pts = (struct timespec *)(uintptr_t) timeout;
5443 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
5445 (base_op == FUTEX_CMP_REQUEUE
5449 return -TARGET_ENOSYS;
5452 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5453 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
5454 abi_long handle, abi_long mount_id,
5457 struct file_handle *target_fh;
5458 struct file_handle *fh;
5462 unsigned int size, total_size;
5464 if (get_user_s32(size, handle)) {
5465 return -TARGET_EFAULT;
5468 name = lock_user_string(pathname);
5470 return -TARGET_EFAULT;
5473 total_size = sizeof(struct file_handle) + size;
5474 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
5476 unlock_user(name, pathname, 0);
5477 return -TARGET_EFAULT;
5480 fh = g_malloc0(total_size);
5481 fh->handle_bytes = size;
5483 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
5484 unlock_user(name, pathname, 0);
5486 /* man name_to_handle_at(2):
5487 * Other than the use of the handle_bytes field, the caller should treat
5488 * the file_handle structure as an opaque data type
5491 memcpy(target_fh, fh, total_size);
5492 target_fh->handle_bytes = tswap32(fh->handle_bytes);
5493 target_fh->handle_type = tswap32(fh->handle_type);
5495 unlock_user(target_fh, handle, total_size);
5497 if (put_user_s32(mid, mount_id)) {
5498 return -TARGET_EFAULT;
5506 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5507 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
5510 struct file_handle *target_fh;
5511 struct file_handle *fh;
5512 unsigned int size, total_size;
5515 if (get_user_s32(size, handle)) {
5516 return -TARGET_EFAULT;
5519 total_size = sizeof(struct file_handle) + size;
5520 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
5522 return -TARGET_EFAULT;
5525 fh = g_memdup(target_fh, total_size);
5526 fh->handle_bytes = size;
5527 fh->handle_type = tswap32(target_fh->handle_type);
5529 ret = get_errno(open_by_handle_at(mount_fd, fh,
5530 target_to_host_bitmask(flags, fcntl_flags_tbl)));
5534 unlock_user(target_fh, handle, total_size);
5540 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5542 /* signalfd siginfo conversion */
5545 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
5546 const struct signalfd_siginfo *info)
5548 int sig = host_to_target_signal(info->ssi_signo);
5550 /* linux/signalfd.h defines a ssi_addr_lsb
5551 * not defined in sys/signalfd.h but used by some kernels
5554 #ifdef BUS_MCEERR_AO
5555 if (tinfo->ssi_signo == SIGBUS &&
5556 (tinfo->ssi_code == BUS_MCEERR_AR ||
5557 tinfo->ssi_code == BUS_MCEERR_AO)) {
5558 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
5559 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
5560 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
5564 tinfo->ssi_signo = tswap32(sig);
5565 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
5566 tinfo->ssi_code = tswap32(info->ssi_code);
5567 tinfo->ssi_pid = tswap32(info->ssi_pid);
5568 tinfo->ssi_uid = tswap32(info->ssi_uid);
5569 tinfo->ssi_fd = tswap32(info->ssi_fd);
5570 tinfo->ssi_tid = tswap32(info->ssi_tid);
5571 tinfo->ssi_band = tswap32(info->ssi_band);
5572 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
5573 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
5574 tinfo->ssi_status = tswap32(info->ssi_status);
5575 tinfo->ssi_int = tswap32(info->ssi_int);
5576 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
5577 tinfo->ssi_utime = tswap64(info->ssi_utime);
5578 tinfo->ssi_stime = tswap64(info->ssi_stime);
5579 tinfo->ssi_addr = tswap64(info->ssi_addr);
5582 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
5586 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
5587 host_to_target_signalfd_siginfo(buf + i, buf + i);
5593 static TargetFdTrans target_signalfd_trans = {
5594 .host_to_target_data = host_to_target_data_signalfd,
5597 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
5600 target_sigset_t *target_mask;
5604 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
5605 return -TARGET_EINVAL;
5607 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
5608 return -TARGET_EFAULT;
5611 target_to_host_sigset(&host_mask, target_mask);
5613 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
5615 ret = get_errno(signalfd(fd, &host_mask, host_flags));
5617 fd_trans_register(ret, &target_signalfd_trans);
5620 unlock_user_struct(target_mask, mask, 0);
5626 /* Map host to target signal numbers for the wait family of syscalls.
5627 Assume all other status bits are the same. */
5628 int host_to_target_waitstatus(int status)
5630 if (WIFSIGNALED(status)) {
5631 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5633 if (WIFSTOPPED(status)) {
5634 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5640 static int open_self_cmdline(void *cpu_env, int fd)
5643 bool word_skipped = false;
5645 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5655 nb_read = read(fd_orig, buf, sizeof(buf));
5658 fd_orig = close(fd_orig);
5661 } else if (nb_read == 0) {
5665 if (!word_skipped) {
5666 /* Skip the first string, which is the path to qemu-*-static
5667 instead of the actual command. */
5668 cp_buf = memchr(buf, 0, sizeof(buf));
5670 /* Null byte found, skip one string */
5672 nb_read -= cp_buf - buf;
5673 word_skipped = true;
5678 if (write(fd, cp_buf, nb_read) != nb_read) {
5687 return close(fd_orig);
5690 static int open_self_maps(void *cpu_env, int fd)
5692 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5693 TaskState *ts = cpu->opaque;
5699 fp = fopen("/proc/self/maps", "r");
5704 while ((read = getline(&line, &len, fp)) != -1) {
5705 int fields, dev_maj, dev_min, inode;
5706 uint64_t min, max, offset;
5707 char flag_r, flag_w, flag_x, flag_p;
5708 char path[512] = "";
5709 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5710 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5711 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5713 if ((fields < 10) || (fields > 11)) {
5716 if (h2g_valid(min)) {
5717 int flags = page_get_flags(h2g(min));
5718 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5719 if (page_check_range(h2g(min), max - min, flags) == -1) {
5722 if (h2g(min) == ts->info->stack_limit) {
5723 pstrcpy(path, sizeof(path), " [stack]");
5725 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5726 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5727 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5728 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5729 path[0] ? " " : "", path);
5739 static int open_self_stat(void *cpu_env, int fd)
5741 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5742 TaskState *ts = cpu->opaque;
5743 abi_ulong start_stack = ts->info->start_stack;
5746 for (i = 0; i < 44; i++) {
5754 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5755 } else if (i == 1) {
5757 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5758 } else if (i == 27) {
5761 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5763 /* for the rest, there is MasterCard */
5764 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5768 if (write(fd, buf, len) != len) {
5776 static int open_self_auxv(void *cpu_env, int fd)
5778 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5779 TaskState *ts = cpu->opaque;
5780 abi_ulong auxv = ts->info->saved_auxv;
5781 abi_ulong len = ts->info->auxv_len;
5785 * Auxiliary vector is stored in target process stack.
5786 * read in whole auxv vector and copy it to file
5788 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5792 r = write(fd, ptr, len);
5799 lseek(fd, 0, SEEK_SET);
5800 unlock_user(ptr, auxv, len);
5806 static int is_proc_myself(const char *filename, const char *entry)
5808 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5809 filename += strlen("/proc/");
5810 if (!strncmp(filename, "self/", strlen("self/"))) {
5811 filename += strlen("self/");
5812 } else if (*filename >= '1' && *filename <= '9') {
5814 snprintf(myself, sizeof(myself), "%d/", getpid());
5815 if (!strncmp(filename, myself, strlen(myself))) {
5816 filename += strlen(myself);
5823 if (!strcmp(filename, entry)) {
5830 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5831 static int is_proc(const char *filename, const char *entry)
5833 return strcmp(filename, entry) == 0;
5836 static int open_net_route(void *cpu_env, int fd)
5843 fp = fopen("/proc/net/route", "r");
5850 read = getline(&line, &len, fp);
5851 dprintf(fd, "%s", line);
5855 while ((read = getline(&line, &len, fp)) != -1) {
5857 uint32_t dest, gw, mask;
5858 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5859 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5860 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5861 &mask, &mtu, &window, &irtt);
5862 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5863 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5864 metric, tswap32(mask), mtu, window, irtt);
5874 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5877 const char *filename;
5878 int (*fill)(void *cpu_env, int fd);
5879 int (*cmp)(const char *s1, const char *s2);
5881 const struct fake_open *fake_open;
5882 static const struct fake_open fakes[] = {
5883 { "maps", open_self_maps, is_proc_myself },
5884 { "stat", open_self_stat, is_proc_myself },
5885 { "auxv", open_self_auxv, is_proc_myself },
5886 { "cmdline", open_self_cmdline, is_proc_myself },
5887 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5888 { "/proc/net/route", open_net_route, is_proc },
5890 { NULL, NULL, NULL }
5893 if (is_proc_myself(pathname, "exe")) {
5894 int execfd = qemu_getauxval(AT_EXECFD);
5895 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
5898 for (fake_open = fakes; fake_open->filename; fake_open++) {
5899 if (fake_open->cmp(pathname, fake_open->filename)) {
5904 if (fake_open->filename) {
5906 char filename[PATH_MAX];
5909 /* create temporary file to map stat to */
5910 tmpdir = getenv("TMPDIR");
5913 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5914 fd = mkstemp(filename);
5920 if ((r = fake_open->fill(cpu_env, fd))) {
5926 lseek(fd, 0, SEEK_SET);
5931 return safe_openat(dirfd, path(pathname), flags, mode);
5934 #define TIMER_MAGIC 0x0caf0000
5935 #define TIMER_MAGIC_MASK 0xffff0000
5937 /* Convert QEMU provided timer ID back to internal 16bit index format */
5938 static target_timer_t get_timer_id(abi_long arg)
5940 target_timer_t timerid = arg;
5942 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5943 return -TARGET_EINVAL;
5948 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5949 return -TARGET_EINVAL;
5955 /* do_syscall() should always have a single exit point at the end so
5956 that actions, such as logging of syscall results, can be performed.
5957 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5958 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5959 abi_long arg2, abi_long arg3, abi_long arg4,
5960 abi_long arg5, abi_long arg6, abi_long arg7,
5963 CPUState *cpu = ENV_GET_CPU(cpu_env);
5969 #if defined(DEBUG_ERESTARTSYS)
5970 /* Debug-only code for exercising the syscall-restart code paths
5971 * in the per-architecture cpu main loops: restart every syscall
5972 * the guest makes once before letting it through.
5979 return -TARGET_ERESTARTSYS;
5985 gemu_log("syscall %d", num);
5988 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5991 case TARGET_NR_exit:
5992 /* In old applications this may be used to implement _exit(2).
5993 However in threaded applictions it is used for thread termination,
5994 and _exit_group is used for application termination.
5995 Do thread termination if we have more then one thread. */
5996 /* FIXME: This probably breaks if a signal arrives. We should probably
5997 be disabling signals. */
5998 if (CPU_NEXT(first_cpu)) {
6002 /* Remove the CPU from the list. */
6003 QTAILQ_REMOVE(&cpus, cpu, node);
6006 if (ts->child_tidptr) {
6007 put_user_u32(0, ts->child_tidptr);
6008 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6012 object_unref(OBJECT(cpu));
6014 rcu_unregister_thread();
6020 gdb_exit(cpu_env, arg1);
6022 ret = 0; /* avoid warning */
6024 case TARGET_NR_read:
6028 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6030 ret = get_errno(safe_read(arg1, p, arg3));
6032 fd_trans_host_to_target_data(arg1)) {
6033 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6035 unlock_user(p, arg2, ret);
6038 case TARGET_NR_write:
6039 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6041 ret = get_errno(safe_write(arg1, p, arg3));
6042 unlock_user(p, arg2, 0);
6044 #ifdef TARGET_NR_open
6045 case TARGET_NR_open:
6046 if (!(p = lock_user_string(arg1)))
6048 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6049 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6051 fd_trans_unregister(ret);
6052 unlock_user(p, arg1, 0);
6055 case TARGET_NR_openat:
6056 if (!(p = lock_user_string(arg2)))
6058 ret = get_errno(do_openat(cpu_env, arg1, p,
6059 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6061 fd_trans_unregister(ret);
6062 unlock_user(p, arg2, 0);
6064 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6065 case TARGET_NR_name_to_handle_at:
6066 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6069 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6070 case TARGET_NR_open_by_handle_at:
6071 ret = do_open_by_handle_at(arg1, arg2, arg3);
6072 fd_trans_unregister(ret);
6075 case TARGET_NR_close:
6076 fd_trans_unregister(arg1);
6077 ret = get_errno(close(arg1));
6082 #ifdef TARGET_NR_fork
6083 case TARGET_NR_fork:
6084 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6087 #ifdef TARGET_NR_waitpid
6088 case TARGET_NR_waitpid:
6091 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6092 if (!is_error(ret) && arg2 && ret
6093 && put_user_s32(host_to_target_waitstatus(status), arg2))
6098 #ifdef TARGET_NR_waitid
6099 case TARGET_NR_waitid:
6103 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6104 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6105 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6107 host_to_target_siginfo(p, &info);
6108 unlock_user(p, arg3, sizeof(target_siginfo_t));
6113 #ifdef TARGET_NR_creat /* not on alpha */
6114 case TARGET_NR_creat:
6115 if (!(p = lock_user_string(arg1)))
6117 ret = get_errno(creat(p, arg2));
6118 fd_trans_unregister(ret);
6119 unlock_user(p, arg1, 0);
6122 #ifdef TARGET_NR_link
6123 case TARGET_NR_link:
6126 p = lock_user_string(arg1);
6127 p2 = lock_user_string(arg2);
6129 ret = -TARGET_EFAULT;
6131 ret = get_errno(link(p, p2));
6132 unlock_user(p2, arg2, 0);
6133 unlock_user(p, arg1, 0);
6137 #if defined(TARGET_NR_linkat)
6138 case TARGET_NR_linkat:
6143 p = lock_user_string(arg2);
6144 p2 = lock_user_string(arg4);
6146 ret = -TARGET_EFAULT;
6148 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6149 unlock_user(p, arg2, 0);
6150 unlock_user(p2, arg4, 0);
6154 #ifdef TARGET_NR_unlink
6155 case TARGET_NR_unlink:
6156 if (!(p = lock_user_string(arg1)))
6158 ret = get_errno(unlink(p));
6159 unlock_user(p, arg1, 0);
6162 #if defined(TARGET_NR_unlinkat)
6163 case TARGET_NR_unlinkat:
6164 if (!(p = lock_user_string(arg2)))
6166 ret = get_errno(unlinkat(arg1, p, arg3));
6167 unlock_user(p, arg2, 0);
6170 case TARGET_NR_execve:
6172 char **argp, **envp;
6175 abi_ulong guest_argp;
6176 abi_ulong guest_envp;
6183 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6184 if (get_user_ual(addr, gp))
6192 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6193 if (get_user_ual(addr, gp))
6200 argp = alloca((argc + 1) * sizeof(void *));
6201 envp = alloca((envc + 1) * sizeof(void *));
6203 for (gp = guest_argp, q = argp; gp;
6204 gp += sizeof(abi_ulong), q++) {
6205 if (get_user_ual(addr, gp))
6209 if (!(*q = lock_user_string(addr)))
6211 total_size += strlen(*q) + 1;
6215 for (gp = guest_envp, q = envp; gp;
6216 gp += sizeof(abi_ulong), q++) {
6217 if (get_user_ual(addr, gp))
6221 if (!(*q = lock_user_string(addr)))
6223 total_size += strlen(*q) + 1;
6227 if (!(p = lock_user_string(arg1)))
6229 /* Although execve() is not an interruptible syscall it is
6230 * a special case where we must use the safe_syscall wrapper:
6231 * if we allow a signal to happen before we make the host
6232 * syscall then we will 'lose' it, because at the point of
6233 * execve the process leaves QEMU's control. So we use the
6234 * safe syscall wrapper to ensure that we either take the
6235 * signal as a guest signal, or else it does not happen
6236 * before the execve completes and makes it the other
6237 * program's problem.
6239 ret = get_errno(safe_execve(p, argp, envp));
6240 unlock_user(p, arg1, 0);
6245 ret = -TARGET_EFAULT;
6248 for (gp = guest_argp, q = argp; *q;
6249 gp += sizeof(abi_ulong), q++) {
6250 if (get_user_ual(addr, gp)
6253 unlock_user(*q, addr, 0);
6255 for (gp = guest_envp, q = envp; *q;
6256 gp += sizeof(abi_ulong), q++) {
6257 if (get_user_ual(addr, gp)
6260 unlock_user(*q, addr, 0);
6264 case TARGET_NR_chdir:
6265 if (!(p = lock_user_string(arg1)))
6267 ret = get_errno(chdir(p));
6268 unlock_user(p, arg1, 0);
6270 #ifdef TARGET_NR_time
6271 case TARGET_NR_time:
6274 ret = get_errno(time(&host_time));
6277 && put_user_sal(host_time, arg1))
6282 #ifdef TARGET_NR_mknod
6283 case TARGET_NR_mknod:
6284 if (!(p = lock_user_string(arg1)))
6286 ret = get_errno(mknod(p, arg2, arg3));
6287 unlock_user(p, arg1, 0);
6290 #if defined(TARGET_NR_mknodat)
6291 case TARGET_NR_mknodat:
6292 if (!(p = lock_user_string(arg2)))
6294 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6295 unlock_user(p, arg2, 0);
6298 #ifdef TARGET_NR_chmod
6299 case TARGET_NR_chmod:
6300 if (!(p = lock_user_string(arg1)))
6302 ret = get_errno(chmod(p, arg2));
6303 unlock_user(p, arg1, 0);
6306 #ifdef TARGET_NR_break
6307 case TARGET_NR_break:
6310 #ifdef TARGET_NR_oldstat
6311 case TARGET_NR_oldstat:
6314 case TARGET_NR_lseek:
6315 ret = get_errno(lseek(arg1, arg2, arg3));
6317 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6318 /* Alpha specific */
6319 case TARGET_NR_getxpid:
6320 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
6321 ret = get_errno(getpid());
6324 #ifdef TARGET_NR_getpid
6325 case TARGET_NR_getpid:
6326 ret = get_errno(getpid());
6329 case TARGET_NR_mount:
6331 /* need to look at the data field */
6335 p = lock_user_string(arg1);
6343 p2 = lock_user_string(arg2);
6346 unlock_user(p, arg1, 0);
6352 p3 = lock_user_string(arg3);
6355 unlock_user(p, arg1, 0);
6357 unlock_user(p2, arg2, 0);
6364 /* FIXME - arg5 should be locked, but it isn't clear how to
6365 * do that since it's not guaranteed to be a NULL-terminated
6369 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
6371 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
6373 ret = get_errno(ret);
6376 unlock_user(p, arg1, 0);
6378 unlock_user(p2, arg2, 0);
6380 unlock_user(p3, arg3, 0);
6384 #ifdef TARGET_NR_umount
6385 case TARGET_NR_umount:
6386 if (!(p = lock_user_string(arg1)))
6388 ret = get_errno(umount(p));
6389 unlock_user(p, arg1, 0);
6392 #ifdef TARGET_NR_stime /* not on alpha */
6393 case TARGET_NR_stime:
6396 if (get_user_sal(host_time, arg1))
6398 ret = get_errno(stime(&host_time));
6402 case TARGET_NR_ptrace:
6404 #ifdef TARGET_NR_alarm /* not on alpha */
6405 case TARGET_NR_alarm:
6409 #ifdef TARGET_NR_oldfstat
6410 case TARGET_NR_oldfstat:
6413 #ifdef TARGET_NR_pause /* not on alpha */
6414 case TARGET_NR_pause:
6415 ret = get_errno(pause());
6418 #ifdef TARGET_NR_utime
6419 case TARGET_NR_utime:
6421 struct utimbuf tbuf, *host_tbuf;
6422 struct target_utimbuf *target_tbuf;
6424 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
6426 tbuf.actime = tswapal(target_tbuf->actime);
6427 tbuf.modtime = tswapal(target_tbuf->modtime);
6428 unlock_user_struct(target_tbuf, arg2, 0);
6433 if (!(p = lock_user_string(arg1)))
6435 ret = get_errno(utime(p, host_tbuf));
6436 unlock_user(p, arg1, 0);
6440 #ifdef TARGET_NR_utimes
6441 case TARGET_NR_utimes:
6443 struct timeval *tvp, tv[2];
6445 if (copy_from_user_timeval(&tv[0], arg2)
6446 || copy_from_user_timeval(&tv[1],
6447 arg2 + sizeof(struct target_timeval)))
6453 if (!(p = lock_user_string(arg1)))
6455 ret = get_errno(utimes(p, tvp));
6456 unlock_user(p, arg1, 0);
6460 #if defined(TARGET_NR_futimesat)
6461 case TARGET_NR_futimesat:
6463 struct timeval *tvp, tv[2];
6465 if (copy_from_user_timeval(&tv[0], arg3)
6466 || copy_from_user_timeval(&tv[1],
6467 arg3 + sizeof(struct target_timeval)))
6473 if (!(p = lock_user_string(arg2)))
6475 ret = get_errno(futimesat(arg1, path(p), tvp));
6476 unlock_user(p, arg2, 0);
6480 #ifdef TARGET_NR_stty
6481 case TARGET_NR_stty:
6484 #ifdef TARGET_NR_gtty
6485 case TARGET_NR_gtty:
6488 #ifdef TARGET_NR_access
6489 case TARGET_NR_access:
6490 if (!(p = lock_user_string(arg1)))
6492 ret = get_errno(access(path(p), arg2));
6493 unlock_user(p, arg1, 0);
6496 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6497 case TARGET_NR_faccessat:
6498 if (!(p = lock_user_string(arg2)))
6500 ret = get_errno(faccessat(arg1, p, arg3, 0));
6501 unlock_user(p, arg2, 0);
6504 #ifdef TARGET_NR_nice /* not on alpha */
6505 case TARGET_NR_nice:
6506 ret = get_errno(nice(arg1));
6509 #ifdef TARGET_NR_ftime
6510 case TARGET_NR_ftime:
6513 case TARGET_NR_sync:
6517 case TARGET_NR_kill:
6518 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6520 #ifdef TARGET_NR_rename
6521 case TARGET_NR_rename:
6524 p = lock_user_string(arg1);
6525 p2 = lock_user_string(arg2);
6527 ret = -TARGET_EFAULT;
6529 ret = get_errno(rename(p, p2));
6530 unlock_user(p2, arg2, 0);
6531 unlock_user(p, arg1, 0);
6535 #if defined(TARGET_NR_renameat)
6536 case TARGET_NR_renameat:
6539 p = lock_user_string(arg2);
6540 p2 = lock_user_string(arg4);
6542 ret = -TARGET_EFAULT;
6544 ret = get_errno(renameat(arg1, p, arg3, p2));
6545 unlock_user(p2, arg4, 0);
6546 unlock_user(p, arg2, 0);
6550 #ifdef TARGET_NR_mkdir
6551 case TARGET_NR_mkdir:
6552 if (!(p = lock_user_string(arg1)))
6554 ret = get_errno(mkdir(p, arg2));
6555 unlock_user(p, arg1, 0);
6558 #if defined(TARGET_NR_mkdirat)
6559 case TARGET_NR_mkdirat:
6560 if (!(p = lock_user_string(arg2)))
6562 ret = get_errno(mkdirat(arg1, p, arg3));
6563 unlock_user(p, arg2, 0);
6566 #ifdef TARGET_NR_rmdir
6567 case TARGET_NR_rmdir:
6568 if (!(p = lock_user_string(arg1)))
6570 ret = get_errno(rmdir(p));
6571 unlock_user(p, arg1, 0);
6575 ret = get_errno(dup(arg1));
6577 fd_trans_dup(arg1, ret);
6580 #ifdef TARGET_NR_pipe
6581 case TARGET_NR_pipe:
6582 ret = do_pipe(cpu_env, arg1, 0, 0);
6585 #ifdef TARGET_NR_pipe2
6586 case TARGET_NR_pipe2:
6587 ret = do_pipe(cpu_env, arg1,
6588 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6591 case TARGET_NR_times:
6593 struct target_tms *tmsp;
6595 ret = get_errno(times(&tms));
6597 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6600 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6601 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6602 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6603 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6606 ret = host_to_target_clock_t(ret);
6609 #ifdef TARGET_NR_prof
6610 case TARGET_NR_prof:
6613 #ifdef TARGET_NR_signal
6614 case TARGET_NR_signal:
6617 case TARGET_NR_acct:
6619 ret = get_errno(acct(NULL));
6621 if (!(p = lock_user_string(arg1)))
6623 ret = get_errno(acct(path(p)));
6624 unlock_user(p, arg1, 0);
6627 #ifdef TARGET_NR_umount2
6628 case TARGET_NR_umount2:
6629 if (!(p = lock_user_string(arg1)))
6631 ret = get_errno(umount2(p, arg2));
6632 unlock_user(p, arg1, 0);
6635 #ifdef TARGET_NR_lock
6636 case TARGET_NR_lock:
6639 case TARGET_NR_ioctl:
6640 ret = do_ioctl(arg1, arg2, arg3);
6642 case TARGET_NR_fcntl:
6643 ret = do_fcntl(arg1, arg2, arg3);
6645 #ifdef TARGET_NR_mpx
6649 case TARGET_NR_setpgid:
6650 ret = get_errno(setpgid(arg1, arg2));
6652 #ifdef TARGET_NR_ulimit
6653 case TARGET_NR_ulimit:
6656 #ifdef TARGET_NR_oldolduname
6657 case TARGET_NR_oldolduname:
6660 case TARGET_NR_umask:
6661 ret = get_errno(umask(arg1));
6663 case TARGET_NR_chroot:
6664 if (!(p = lock_user_string(arg1)))
6666 ret = get_errno(chroot(p));
6667 unlock_user(p, arg1, 0);
6669 #ifdef TARGET_NR_ustat
6670 case TARGET_NR_ustat:
6673 #ifdef TARGET_NR_dup2
6674 case TARGET_NR_dup2:
6675 ret = get_errno(dup2(arg1, arg2));
6677 fd_trans_dup(arg1, arg2);
6681 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6682 case TARGET_NR_dup3:
6683 ret = get_errno(dup3(arg1, arg2, arg3));
6685 fd_trans_dup(arg1, arg2);
6689 #ifdef TARGET_NR_getppid /* not on alpha */
6690 case TARGET_NR_getppid:
6691 ret = get_errno(getppid());
6694 #ifdef TARGET_NR_getpgrp
6695 case TARGET_NR_getpgrp:
6696 ret = get_errno(getpgrp());
6699 case TARGET_NR_setsid:
6700 ret = get_errno(setsid());
6702 #ifdef TARGET_NR_sigaction
6703 case TARGET_NR_sigaction:
6705 #if defined(TARGET_ALPHA)
6706 struct target_sigaction act, oact, *pact = 0;
6707 struct target_old_sigaction *old_act;
6709 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6711 act._sa_handler = old_act->_sa_handler;
6712 target_siginitset(&act.sa_mask, old_act->sa_mask);
6713 act.sa_flags = old_act->sa_flags;
6714 act.sa_restorer = 0;
6715 unlock_user_struct(old_act, arg2, 0);
6718 ret = get_errno(do_sigaction(arg1, pact, &oact));
6719 if (!is_error(ret) && arg3) {
6720 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6722 old_act->_sa_handler = oact._sa_handler;
6723 old_act->sa_mask = oact.sa_mask.sig[0];
6724 old_act->sa_flags = oact.sa_flags;
6725 unlock_user_struct(old_act, arg3, 1);
6727 #elif defined(TARGET_MIPS)
6728 struct target_sigaction act, oact, *pact, *old_act;
6731 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6733 act._sa_handler = old_act->_sa_handler;
6734 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6735 act.sa_flags = old_act->sa_flags;
6736 unlock_user_struct(old_act, arg2, 0);
6742 ret = get_errno(do_sigaction(arg1, pact, &oact));
6744 if (!is_error(ret) && arg3) {
6745 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6747 old_act->_sa_handler = oact._sa_handler;
6748 old_act->sa_flags = oact.sa_flags;
6749 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6750 old_act->sa_mask.sig[1] = 0;
6751 old_act->sa_mask.sig[2] = 0;
6752 old_act->sa_mask.sig[3] = 0;
6753 unlock_user_struct(old_act, arg3, 1);
6756 struct target_old_sigaction *old_act;
6757 struct target_sigaction act, oact, *pact;
6759 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6761 act._sa_handler = old_act->_sa_handler;
6762 target_siginitset(&act.sa_mask, old_act->sa_mask);
6763 act.sa_flags = old_act->sa_flags;
6764 act.sa_restorer = old_act->sa_restorer;
6765 unlock_user_struct(old_act, arg2, 0);
6770 ret = get_errno(do_sigaction(arg1, pact, &oact));
6771 if (!is_error(ret) && arg3) {
6772 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6774 old_act->_sa_handler = oact._sa_handler;
6775 old_act->sa_mask = oact.sa_mask.sig[0];
6776 old_act->sa_flags = oact.sa_flags;
6777 old_act->sa_restorer = oact.sa_restorer;
6778 unlock_user_struct(old_act, arg3, 1);
6784 case TARGET_NR_rt_sigaction:
6786 #if defined(TARGET_ALPHA)
6787 struct target_sigaction act, oact, *pact = 0;
6788 struct target_rt_sigaction *rt_act;
6789 /* ??? arg4 == sizeof(sigset_t). */
6791 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6793 act._sa_handler = rt_act->_sa_handler;
6794 act.sa_mask = rt_act->sa_mask;
6795 act.sa_flags = rt_act->sa_flags;
6796 act.sa_restorer = arg5;
6797 unlock_user_struct(rt_act, arg2, 0);
6800 ret = get_errno(do_sigaction(arg1, pact, &oact));
6801 if (!is_error(ret) && arg3) {
6802 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6804 rt_act->_sa_handler = oact._sa_handler;
6805 rt_act->sa_mask = oact.sa_mask;
6806 rt_act->sa_flags = oact.sa_flags;
6807 unlock_user_struct(rt_act, arg3, 1);
6810 struct target_sigaction *act;
6811 struct target_sigaction *oact;
6814 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6819 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6820 ret = -TARGET_EFAULT;
6821 goto rt_sigaction_fail;
6825 ret = get_errno(do_sigaction(arg1, act, oact));
6828 unlock_user_struct(act, arg2, 0);
6830 unlock_user_struct(oact, arg3, 1);
6834 #ifdef TARGET_NR_sgetmask /* not on alpha */
6835 case TARGET_NR_sgetmask:
6838 abi_ulong target_set;
6839 do_sigprocmask(0, NULL, &cur_set);
6840 host_to_target_old_sigset(&target_set, &cur_set);
6845 #ifdef TARGET_NR_ssetmask /* not on alpha */
6846 case TARGET_NR_ssetmask:
6848 sigset_t set, oset, cur_set;
6849 abi_ulong target_set = arg1;
6850 do_sigprocmask(0, NULL, &cur_set);
6851 target_to_host_old_sigset(&set, &target_set);
6852 sigorset(&set, &set, &cur_set);
6853 do_sigprocmask(SIG_SETMASK, &set, &oset);
6854 host_to_target_old_sigset(&target_set, &oset);
6859 #ifdef TARGET_NR_sigprocmask
6860 case TARGET_NR_sigprocmask:
6862 #if defined(TARGET_ALPHA)
6863 sigset_t set, oldset;
6868 case TARGET_SIG_BLOCK:
6871 case TARGET_SIG_UNBLOCK:
6874 case TARGET_SIG_SETMASK:
6878 ret = -TARGET_EINVAL;
6882 target_to_host_old_sigset(&set, &mask);
6884 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6885 if (!is_error(ret)) {
6886 host_to_target_old_sigset(&mask, &oldset);
6888 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6891 sigset_t set, oldset, *set_ptr;
6896 case TARGET_SIG_BLOCK:
6899 case TARGET_SIG_UNBLOCK:
6902 case TARGET_SIG_SETMASK:
6906 ret = -TARGET_EINVAL;
6909 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6911 target_to_host_old_sigset(&set, p);
6912 unlock_user(p, arg2, 0);
6918 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6919 if (!is_error(ret) && arg3) {
6920 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6922 host_to_target_old_sigset(p, &oldset);
6923 unlock_user(p, arg3, sizeof(target_sigset_t));
6929 case TARGET_NR_rt_sigprocmask:
6932 sigset_t set, oldset, *set_ptr;
6936 case TARGET_SIG_BLOCK:
6939 case TARGET_SIG_UNBLOCK:
6942 case TARGET_SIG_SETMASK:
6946 ret = -TARGET_EINVAL;
6949 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6951 target_to_host_sigset(&set, p);
6952 unlock_user(p, arg2, 0);
6958 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6959 if (!is_error(ret) && arg3) {
6960 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6962 host_to_target_sigset(p, &oldset);
6963 unlock_user(p, arg3, sizeof(target_sigset_t));
6967 #ifdef TARGET_NR_sigpending
6968 case TARGET_NR_sigpending:
6971 ret = get_errno(sigpending(&set));
6972 if (!is_error(ret)) {
6973 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6975 host_to_target_old_sigset(p, &set);
6976 unlock_user(p, arg1, sizeof(target_sigset_t));
6981 case TARGET_NR_rt_sigpending:
6984 ret = get_errno(sigpending(&set));
6985 if (!is_error(ret)) {
6986 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6988 host_to_target_sigset(p, &set);
6989 unlock_user(p, arg1, sizeof(target_sigset_t));
6993 #ifdef TARGET_NR_sigsuspend
6994 case TARGET_NR_sigsuspend:
6997 #if defined(TARGET_ALPHA)
6998 abi_ulong mask = arg1;
6999 target_to_host_old_sigset(&set, &mask);
7001 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7003 target_to_host_old_sigset(&set, p);
7004 unlock_user(p, arg1, 0);
7006 ret = get_errno(sigsuspend(&set));
7010 case TARGET_NR_rt_sigsuspend:
7013 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7015 target_to_host_sigset(&set, p);
7016 unlock_user(p, arg1, 0);
7017 ret = get_errno(sigsuspend(&set));
7020 case TARGET_NR_rt_sigtimedwait:
7023 struct timespec uts, *puts;
7026 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7028 target_to_host_sigset(&set, p);
7029 unlock_user(p, arg1, 0);
7032 target_to_host_timespec(puts, arg3);
7036 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
7037 if (!is_error(ret)) {
7039 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7044 host_to_target_siginfo(p, &uinfo);
7045 unlock_user(p, arg2, sizeof(target_siginfo_t));
7047 ret = host_to_target_signal(ret);
7051 case TARGET_NR_rt_sigqueueinfo:
7054 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
7056 target_to_host_siginfo(&uinfo, p);
7057 unlock_user(p, arg1, 0);
7058 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7061 #ifdef TARGET_NR_sigreturn
7062 case TARGET_NR_sigreturn:
7063 ret = do_sigreturn(cpu_env);
7066 case TARGET_NR_rt_sigreturn:
7067 ret = do_rt_sigreturn(cpu_env);
7069 case TARGET_NR_sethostname:
7070 if (!(p = lock_user_string(arg1)))
7072 ret = get_errno(sethostname(p, arg2));
7073 unlock_user(p, arg1, 0);
7075 case TARGET_NR_setrlimit:
7077 int resource = target_to_host_resource(arg1);
7078 struct target_rlimit *target_rlim;
7080 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7082 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7083 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7084 unlock_user_struct(target_rlim, arg2, 0);
7085 ret = get_errno(setrlimit(resource, &rlim));
7088 case TARGET_NR_getrlimit:
7090 int resource = target_to_host_resource(arg1);
7091 struct target_rlimit *target_rlim;
7094 ret = get_errno(getrlimit(resource, &rlim));
7095 if (!is_error(ret)) {
7096 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7098 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7099 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7100 unlock_user_struct(target_rlim, arg2, 1);
7104 case TARGET_NR_getrusage:
7106 struct rusage rusage;
7107 ret = get_errno(getrusage(arg1, &rusage));
7108 if (!is_error(ret)) {
7109 ret = host_to_target_rusage(arg2, &rusage);
7113 case TARGET_NR_gettimeofday:
7116 ret = get_errno(gettimeofday(&tv, NULL));
7117 if (!is_error(ret)) {
7118 if (copy_to_user_timeval(arg1, &tv))
7123 case TARGET_NR_settimeofday:
7125 struct timeval tv, *ptv = NULL;
7126 struct timezone tz, *ptz = NULL;
7129 if (copy_from_user_timeval(&tv, arg1)) {
7136 if (copy_from_user_timezone(&tz, arg2)) {
7142 ret = get_errno(settimeofday(ptv, ptz));
7145 #if defined(TARGET_NR_select)
7146 case TARGET_NR_select:
7147 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7148 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7151 struct target_sel_arg_struct *sel;
7152 abi_ulong inp, outp, exp, tvp;
7155 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7157 nsel = tswapal(sel->n);
7158 inp = tswapal(sel->inp);
7159 outp = tswapal(sel->outp);
7160 exp = tswapal(sel->exp);
7161 tvp = tswapal(sel->tvp);
7162 unlock_user_struct(sel, arg1, 0);
7163 ret = do_select(nsel, inp, outp, exp, tvp);
7168 #ifdef TARGET_NR_pselect6
7169 case TARGET_NR_pselect6:
7171 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7172 fd_set rfds, wfds, efds;
7173 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7174 struct timespec ts, *ts_ptr;
7177 * The 6th arg is actually two args smashed together,
7178 * so we cannot use the C library.
7186 abi_ulong arg_sigset, arg_sigsize, *arg7;
7187 target_sigset_t *target_sigset;
7195 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7199 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7203 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7209 * This takes a timespec, and not a timeval, so we cannot
7210 * use the do_select() helper ...
7213 if (target_to_host_timespec(&ts, ts_addr)) {
7221 /* Extract the two packed args for the sigset */
7224 sig.size = _NSIG / 8;
7226 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7230 arg_sigset = tswapal(arg7[0]);
7231 arg_sigsize = tswapal(arg7[1]);
7232 unlock_user(arg7, arg6, 0);
7236 if (arg_sigsize != sizeof(*target_sigset)) {
7237 /* Like the kernel, we enforce correct size sigsets */
7238 ret = -TARGET_EINVAL;
7241 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7242 sizeof(*target_sigset), 1);
7243 if (!target_sigset) {
7246 target_to_host_sigset(&set, target_sigset);
7247 unlock_user(target_sigset, arg_sigset, 0);
7255 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7258 if (!is_error(ret)) {
7259 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7261 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7263 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7266 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7272 #ifdef TARGET_NR_symlink
7273 case TARGET_NR_symlink:
7276 p = lock_user_string(arg1);
7277 p2 = lock_user_string(arg2);
7279 ret = -TARGET_EFAULT;
7281 ret = get_errno(symlink(p, p2));
7282 unlock_user(p2, arg2, 0);
7283 unlock_user(p, arg1, 0);
7287 #if defined(TARGET_NR_symlinkat)
7288 case TARGET_NR_symlinkat:
7291 p = lock_user_string(arg1);
7292 p2 = lock_user_string(arg3);
7294 ret = -TARGET_EFAULT;
7296 ret = get_errno(symlinkat(p, arg2, p2));
7297 unlock_user(p2, arg3, 0);
7298 unlock_user(p, arg1, 0);
7302 #ifdef TARGET_NR_oldlstat
7303 case TARGET_NR_oldlstat:
7306 #ifdef TARGET_NR_readlink
7307 case TARGET_NR_readlink:
7310 p = lock_user_string(arg1);
7311 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7313 ret = -TARGET_EFAULT;
7315 /* Short circuit this for the magic exe check. */
7316 ret = -TARGET_EINVAL;
7317 } else if (is_proc_myself((const char *)p, "exe")) {
7318 char real[PATH_MAX], *temp;
7319 temp = realpath(exec_path, real);
7320 /* Return value is # of bytes that we wrote to the buffer. */
7322 ret = get_errno(-1);
7324 /* Don't worry about sign mismatch as earlier mapping
7325 * logic would have thrown a bad address error. */
7326 ret = MIN(strlen(real), arg3);
7327 /* We cannot NUL terminate the string. */
7328 memcpy(p2, real, ret);
7331 ret = get_errno(readlink(path(p), p2, arg3));
7333 unlock_user(p2, arg2, ret);
7334 unlock_user(p, arg1, 0);
7338 #if defined(TARGET_NR_readlinkat)
7339 case TARGET_NR_readlinkat:
7342 p = lock_user_string(arg2);
7343 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7345 ret = -TARGET_EFAULT;
7346 } else if (is_proc_myself((const char *)p, "exe")) {
7347 char real[PATH_MAX], *temp;
7348 temp = realpath(exec_path, real);
7349 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
7350 snprintf((char *)p2, arg4, "%s", real);
7352 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
7354 unlock_user(p2, arg3, ret);
7355 unlock_user(p, arg2, 0);
7359 #ifdef TARGET_NR_uselib
7360 case TARGET_NR_uselib:
7363 #ifdef TARGET_NR_swapon
7364 case TARGET_NR_swapon:
7365 if (!(p = lock_user_string(arg1)))
7367 ret = get_errno(swapon(p, arg2));
7368 unlock_user(p, arg1, 0);
7371 case TARGET_NR_reboot:
7372 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
7373 /* arg4 must be ignored in all other cases */
7374 p = lock_user_string(arg4);
7378 ret = get_errno(reboot(arg1, arg2, arg3, p));
7379 unlock_user(p, arg4, 0);
7381 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
7384 #ifdef TARGET_NR_readdir
7385 case TARGET_NR_readdir:
7388 #ifdef TARGET_NR_mmap
7389 case TARGET_NR_mmap:
7390 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7391 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7392 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7393 || defined(TARGET_S390X)
7396 abi_ulong v1, v2, v3, v4, v5, v6;
7397 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
7405 unlock_user(v, arg1, 0);
7406 ret = get_errno(target_mmap(v1, v2, v3,
7407 target_to_host_bitmask(v4, mmap_flags_tbl),
7411 ret = get_errno(target_mmap(arg1, arg2, arg3,
7412 target_to_host_bitmask(arg4, mmap_flags_tbl),
7418 #ifdef TARGET_NR_mmap2
7419 case TARGET_NR_mmap2:
7421 #define MMAP_SHIFT 12
7423 ret = get_errno(target_mmap(arg1, arg2, arg3,
7424 target_to_host_bitmask(arg4, mmap_flags_tbl),
7426 arg6 << MMAP_SHIFT));
7429 case TARGET_NR_munmap:
7430 ret = get_errno(target_munmap(arg1, arg2));
7432 case TARGET_NR_mprotect:
7434 TaskState *ts = cpu->opaque;
7435 /* Special hack to detect libc making the stack executable. */
7436 if ((arg3 & PROT_GROWSDOWN)
7437 && arg1 >= ts->info->stack_limit
7438 && arg1 <= ts->info->start_stack) {
7439 arg3 &= ~PROT_GROWSDOWN;
7440 arg2 = arg2 + arg1 - ts->info->stack_limit;
7441 arg1 = ts->info->stack_limit;
7444 ret = get_errno(target_mprotect(arg1, arg2, arg3));
7446 #ifdef TARGET_NR_mremap
7447 case TARGET_NR_mremap:
7448 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
7451 /* ??? msync/mlock/munlock are broken for softmmu. */
7452 #ifdef TARGET_NR_msync
7453 case TARGET_NR_msync:
7454 ret = get_errno(msync(g2h(arg1), arg2, arg3));
7457 #ifdef TARGET_NR_mlock
7458 case TARGET_NR_mlock:
7459 ret = get_errno(mlock(g2h(arg1), arg2));
7462 #ifdef TARGET_NR_munlock
7463 case TARGET_NR_munlock:
7464 ret = get_errno(munlock(g2h(arg1), arg2));
7467 #ifdef TARGET_NR_mlockall
7468 case TARGET_NR_mlockall:
7469 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
7472 #ifdef TARGET_NR_munlockall
7473 case TARGET_NR_munlockall:
7474 ret = get_errno(munlockall());
7477 case TARGET_NR_truncate:
7478 if (!(p = lock_user_string(arg1)))
7480 ret = get_errno(truncate(p, arg2));
7481 unlock_user(p, arg1, 0);
7483 case TARGET_NR_ftruncate:
7484 ret = get_errno(ftruncate(arg1, arg2));
7486 case TARGET_NR_fchmod:
7487 ret = get_errno(fchmod(arg1, arg2));
7489 #if defined(TARGET_NR_fchmodat)
7490 case TARGET_NR_fchmodat:
7491 if (!(p = lock_user_string(arg2)))
7493 ret = get_errno(fchmodat(arg1, p, arg3, 0));
7494 unlock_user(p, arg2, 0);
7497 case TARGET_NR_getpriority:
7498 /* Note that negative values are valid for getpriority, so we must
7499 differentiate based on errno settings. */
7501 ret = getpriority(arg1, arg2);
7502 if (ret == -1 && errno != 0) {
7503 ret = -host_to_target_errno(errno);
7507 /* Return value is the unbiased priority. Signal no error. */
7508 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
7510 /* Return value is a biased priority to avoid negative numbers. */
7514 case TARGET_NR_setpriority:
7515 ret = get_errno(setpriority(arg1, arg2, arg3));
7517 #ifdef TARGET_NR_profil
7518 case TARGET_NR_profil:
7521 case TARGET_NR_statfs:
7522 if (!(p = lock_user_string(arg1)))
7524 ret = get_errno(statfs(path(p), &stfs));
7525 unlock_user(p, arg1, 0);
7527 if (!is_error(ret)) {
7528 struct target_statfs *target_stfs;
7530 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7532 __put_user(stfs.f_type, &target_stfs->f_type);
7533 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7534 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7535 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7536 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7537 __put_user(stfs.f_files, &target_stfs->f_files);
7538 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7539 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7540 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7541 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7542 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7543 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7544 unlock_user_struct(target_stfs, arg2, 1);
7547 case TARGET_NR_fstatfs:
7548 ret = get_errno(fstatfs(arg1, &stfs));
7549 goto convert_statfs;
7550 #ifdef TARGET_NR_statfs64
7551 case TARGET_NR_statfs64:
7552 if (!(p = lock_user_string(arg1)))
7554 ret = get_errno(statfs(path(p), &stfs));
7555 unlock_user(p, arg1, 0);
7557 if (!is_error(ret)) {
7558 struct target_statfs64 *target_stfs;
7560 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7562 __put_user(stfs.f_type, &target_stfs->f_type);
7563 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7564 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7565 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7566 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7567 __put_user(stfs.f_files, &target_stfs->f_files);
7568 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7569 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7570 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7571 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7572 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7573 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7574 unlock_user_struct(target_stfs, arg3, 1);
7577 case TARGET_NR_fstatfs64:
7578 ret = get_errno(fstatfs(arg1, &stfs));
7579 goto convert_statfs64;
7581 #ifdef TARGET_NR_ioperm
7582 case TARGET_NR_ioperm:
7585 #ifdef TARGET_NR_socketcall
7586 case TARGET_NR_socketcall:
7587 ret = do_socketcall(arg1, arg2);
7590 #ifdef TARGET_NR_accept
7591 case TARGET_NR_accept:
7592 ret = do_accept4(arg1, arg2, arg3, 0);
7595 #ifdef TARGET_NR_accept4
7596 case TARGET_NR_accept4:
7597 #ifdef CONFIG_ACCEPT4
7598 ret = do_accept4(arg1, arg2, arg3, arg4);
7604 #ifdef TARGET_NR_bind
7605 case TARGET_NR_bind:
7606 ret = do_bind(arg1, arg2, arg3);
7609 #ifdef TARGET_NR_connect
7610 case TARGET_NR_connect:
7611 ret = do_connect(arg1, arg2, arg3);
7614 #ifdef TARGET_NR_getpeername
7615 case TARGET_NR_getpeername:
7616 ret = do_getpeername(arg1, arg2, arg3);
7619 #ifdef TARGET_NR_getsockname
7620 case TARGET_NR_getsockname:
7621 ret = do_getsockname(arg1, arg2, arg3);
7624 #ifdef TARGET_NR_getsockopt
7625 case TARGET_NR_getsockopt:
7626 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7629 #ifdef TARGET_NR_listen
7630 case TARGET_NR_listen:
7631 ret = get_errno(listen(arg1, arg2));
7634 #ifdef TARGET_NR_recv
7635 case TARGET_NR_recv:
7636 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7639 #ifdef TARGET_NR_recvfrom
7640 case TARGET_NR_recvfrom:
7641 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7644 #ifdef TARGET_NR_recvmsg
7645 case TARGET_NR_recvmsg:
7646 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7649 #ifdef TARGET_NR_send
7650 case TARGET_NR_send:
7651 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7654 #ifdef TARGET_NR_sendmsg
7655 case TARGET_NR_sendmsg:
7656 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7659 #ifdef TARGET_NR_sendmmsg
7660 case TARGET_NR_sendmmsg:
7661 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7663 case TARGET_NR_recvmmsg:
7664 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7667 #ifdef TARGET_NR_sendto
7668 case TARGET_NR_sendto:
7669 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7672 #ifdef TARGET_NR_shutdown
7673 case TARGET_NR_shutdown:
7674 ret = get_errno(shutdown(arg1, arg2));
7677 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
7678 case TARGET_NR_getrandom:
7679 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
7683 ret = get_errno(getrandom(p, arg2, arg3));
7684 unlock_user(p, arg1, ret);
7687 #ifdef TARGET_NR_socket
7688 case TARGET_NR_socket:
7689 ret = do_socket(arg1, arg2, arg3);
7690 fd_trans_unregister(ret);
7693 #ifdef TARGET_NR_socketpair
7694 case TARGET_NR_socketpair:
7695 ret = do_socketpair(arg1, arg2, arg3, arg4);
7698 #ifdef TARGET_NR_setsockopt
7699 case TARGET_NR_setsockopt:
7700 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7704 case TARGET_NR_syslog:
7705 if (!(p = lock_user_string(arg2)))
7707 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7708 unlock_user(p, arg2, 0);
7711 case TARGET_NR_setitimer:
7713 struct itimerval value, ovalue, *pvalue;
7717 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7718 || copy_from_user_timeval(&pvalue->it_value,
7719 arg2 + sizeof(struct target_timeval)))
7724 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7725 if (!is_error(ret) && arg3) {
7726 if (copy_to_user_timeval(arg3,
7727 &ovalue.it_interval)
7728 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7734 case TARGET_NR_getitimer:
7736 struct itimerval value;
7738 ret = get_errno(getitimer(arg1, &value));
7739 if (!is_error(ret) && arg2) {
7740 if (copy_to_user_timeval(arg2,
7742 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7748 #ifdef TARGET_NR_stat
7749 case TARGET_NR_stat:
7750 if (!(p = lock_user_string(arg1)))
7752 ret = get_errno(stat(path(p), &st));
7753 unlock_user(p, arg1, 0);
7756 #ifdef TARGET_NR_lstat
7757 case TARGET_NR_lstat:
7758 if (!(p = lock_user_string(arg1)))
7760 ret = get_errno(lstat(path(p), &st));
7761 unlock_user(p, arg1, 0);
7764 case TARGET_NR_fstat:
7766 ret = get_errno(fstat(arg1, &st));
7767 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7770 if (!is_error(ret)) {
7771 struct target_stat *target_st;
7773 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7775 memset(target_st, 0, sizeof(*target_st));
7776 __put_user(st.st_dev, &target_st->st_dev);
7777 __put_user(st.st_ino, &target_st->st_ino);
7778 __put_user(st.st_mode, &target_st->st_mode);
7779 __put_user(st.st_uid, &target_st->st_uid);
7780 __put_user(st.st_gid, &target_st->st_gid);
7781 __put_user(st.st_nlink, &target_st->st_nlink);
7782 __put_user(st.st_rdev, &target_st->st_rdev);
7783 __put_user(st.st_size, &target_st->st_size);
7784 __put_user(st.st_blksize, &target_st->st_blksize);
7785 __put_user(st.st_blocks, &target_st->st_blocks);
7786 __put_user(st.st_atime, &target_st->target_st_atime);
7787 __put_user(st.st_mtime, &target_st->target_st_mtime);
7788 __put_user(st.st_ctime, &target_st->target_st_ctime);
7789 unlock_user_struct(target_st, arg2, 1);
7793 #ifdef TARGET_NR_olduname
7794 case TARGET_NR_olduname:
7797 #ifdef TARGET_NR_iopl
7798 case TARGET_NR_iopl:
7801 case TARGET_NR_vhangup:
7802 ret = get_errno(vhangup());
7804 #ifdef TARGET_NR_idle
7805 case TARGET_NR_idle:
7808 #ifdef TARGET_NR_syscall
7809 case TARGET_NR_syscall:
7810 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7811 arg6, arg7, arg8, 0);
7814 case TARGET_NR_wait4:
7817 abi_long status_ptr = arg2;
7818 struct rusage rusage, *rusage_ptr;
7819 abi_ulong target_rusage = arg4;
7820 abi_long rusage_err;
7822 rusage_ptr = &rusage;
7825 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
7826 if (!is_error(ret)) {
7827 if (status_ptr && ret) {
7828 status = host_to_target_waitstatus(status);
7829 if (put_user_s32(status, status_ptr))
7832 if (target_rusage) {
7833 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7841 #ifdef TARGET_NR_swapoff
7842 case TARGET_NR_swapoff:
7843 if (!(p = lock_user_string(arg1)))
7845 ret = get_errno(swapoff(p));
7846 unlock_user(p, arg1, 0);
7849 case TARGET_NR_sysinfo:
7851 struct target_sysinfo *target_value;
7852 struct sysinfo value;
7853 ret = get_errno(sysinfo(&value));
7854 if (!is_error(ret) && arg1)
7856 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7858 __put_user(value.uptime, &target_value->uptime);
7859 __put_user(value.loads[0], &target_value->loads[0]);
7860 __put_user(value.loads[1], &target_value->loads[1]);
7861 __put_user(value.loads[2], &target_value->loads[2]);
7862 __put_user(value.totalram, &target_value->totalram);
7863 __put_user(value.freeram, &target_value->freeram);
7864 __put_user(value.sharedram, &target_value->sharedram);
7865 __put_user(value.bufferram, &target_value->bufferram);
7866 __put_user(value.totalswap, &target_value->totalswap);
7867 __put_user(value.freeswap, &target_value->freeswap);
7868 __put_user(value.procs, &target_value->procs);
7869 __put_user(value.totalhigh, &target_value->totalhigh);
7870 __put_user(value.freehigh, &target_value->freehigh);
7871 __put_user(value.mem_unit, &target_value->mem_unit);
7872 unlock_user_struct(target_value, arg1, 1);
7876 #ifdef TARGET_NR_ipc
7878 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7881 #ifdef TARGET_NR_semget
7882 case TARGET_NR_semget:
7883 ret = get_errno(semget(arg1, arg2, arg3));
7886 #ifdef TARGET_NR_semop
7887 case TARGET_NR_semop:
7888 ret = do_semop(arg1, arg2, arg3);
7891 #ifdef TARGET_NR_semctl
7892 case TARGET_NR_semctl:
7893 ret = do_semctl(arg1, arg2, arg3, arg4);
7896 #ifdef TARGET_NR_msgctl
7897 case TARGET_NR_msgctl:
7898 ret = do_msgctl(arg1, arg2, arg3);
7901 #ifdef TARGET_NR_msgget
7902 case TARGET_NR_msgget:
7903 ret = get_errno(msgget(arg1, arg2));
7906 #ifdef TARGET_NR_msgrcv
7907 case TARGET_NR_msgrcv:
7908 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7911 #ifdef TARGET_NR_msgsnd
7912 case TARGET_NR_msgsnd:
7913 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7916 #ifdef TARGET_NR_shmget
7917 case TARGET_NR_shmget:
7918 ret = get_errno(shmget(arg1, arg2, arg3));
7921 #ifdef TARGET_NR_shmctl
7922 case TARGET_NR_shmctl:
7923 ret = do_shmctl(arg1, arg2, arg3);
7926 #ifdef TARGET_NR_shmat
7927 case TARGET_NR_shmat:
7928 ret = do_shmat(arg1, arg2, arg3);
7931 #ifdef TARGET_NR_shmdt
7932 case TARGET_NR_shmdt:
7933 ret = do_shmdt(arg1);
7936 case TARGET_NR_fsync:
7937 ret = get_errno(fsync(arg1));
7939 case TARGET_NR_clone:
7940 /* Linux manages to have three different orderings for its
7941 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7942 * match the kernel's CONFIG_CLONE_* settings.
7943 * Microblaze is further special in that it uses a sixth
7944 * implicit argument to clone for the TLS pointer.
7946 #if defined(TARGET_MICROBLAZE)
7947 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7948 #elif defined(TARGET_CLONE_BACKWARDS)
7949 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7950 #elif defined(TARGET_CLONE_BACKWARDS2)
7951 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7953 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7956 #ifdef __NR_exit_group
7957 /* new thread calls */
7958 case TARGET_NR_exit_group:
7962 gdb_exit(cpu_env, arg1);
7963 ret = get_errno(exit_group(arg1));
7966 case TARGET_NR_setdomainname:
7967 if (!(p = lock_user_string(arg1)))
7969 ret = get_errno(setdomainname(p, arg2));
7970 unlock_user(p, arg1, 0);
7972 case TARGET_NR_uname:
7973 /* no need to transcode because we use the linux syscall */
7975 struct new_utsname * buf;
7977 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7979 ret = get_errno(sys_uname(buf));
7980 if (!is_error(ret)) {
7981 /* Overrite the native machine name with whatever is being
7983 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7984 /* Allow the user to override the reported release. */
7985 if (qemu_uname_release && *qemu_uname_release)
7986 strcpy (buf->release, qemu_uname_release);
7988 unlock_user_struct(buf, arg1, 1);
7992 case TARGET_NR_modify_ldt:
7993 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7995 #if !defined(TARGET_X86_64)
7996 case TARGET_NR_vm86old:
7998 case TARGET_NR_vm86:
7999 ret = do_vm86(cpu_env, arg1, arg2);
8003 case TARGET_NR_adjtimex:
8005 #ifdef TARGET_NR_create_module
8006 case TARGET_NR_create_module:
8008 case TARGET_NR_init_module:
8009 case TARGET_NR_delete_module:
8010 #ifdef TARGET_NR_get_kernel_syms
8011 case TARGET_NR_get_kernel_syms:
8014 case TARGET_NR_quotactl:
8016 case TARGET_NR_getpgid:
8017 ret = get_errno(getpgid(arg1));
8019 case TARGET_NR_fchdir:
8020 ret = get_errno(fchdir(arg1));
8022 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8023 case TARGET_NR_bdflush:
8026 #ifdef TARGET_NR_sysfs
8027 case TARGET_NR_sysfs:
8030 case TARGET_NR_personality:
8031 ret = get_errno(personality(arg1));
8033 #ifdef TARGET_NR_afs_syscall
8034 case TARGET_NR_afs_syscall:
8037 #ifdef TARGET_NR__llseek /* Not on alpha */
8038 case TARGET_NR__llseek:
8041 #if !defined(__NR_llseek)
8042 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8044 ret = get_errno(res);
8049 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8051 if ((ret == 0) && put_user_s64(res, arg4)) {
8057 #ifdef TARGET_NR_getdents
8058 case TARGET_NR_getdents:
8059 #ifdef __NR_getdents
8060 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8062 struct target_dirent *target_dirp;
8063 struct linux_dirent *dirp;
8064 abi_long count = arg3;
8066 dirp = g_try_malloc(count);
8068 ret = -TARGET_ENOMEM;
8072 ret = get_errno(sys_getdents(arg1, dirp, count));
8073 if (!is_error(ret)) {
8074 struct linux_dirent *de;
8075 struct target_dirent *tde;
8077 int reclen, treclen;
8078 int count1, tnamelen;
8082 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8086 reclen = de->d_reclen;
8087 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8088 assert(tnamelen >= 0);
8089 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8090 assert(count1 + treclen <= count);
8091 tde->d_reclen = tswap16(treclen);
8092 tde->d_ino = tswapal(de->d_ino);
8093 tde->d_off = tswapal(de->d_off);
8094 memcpy(tde->d_name, de->d_name, tnamelen);
8095 de = (struct linux_dirent *)((char *)de + reclen);
8097 tde = (struct target_dirent *)((char *)tde + treclen);
8101 unlock_user(target_dirp, arg2, ret);
8107 struct linux_dirent *dirp;
8108 abi_long count = arg3;
8110 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8112 ret = get_errno(sys_getdents(arg1, dirp, count));
8113 if (!is_error(ret)) {
8114 struct linux_dirent *de;
8119 reclen = de->d_reclen;
8122 de->d_reclen = tswap16(reclen);
8123 tswapls(&de->d_ino);
8124 tswapls(&de->d_off);
8125 de = (struct linux_dirent *)((char *)de + reclen);
8129 unlock_user(dirp, arg2, ret);
8133 /* Implement getdents in terms of getdents64 */
8135 struct linux_dirent64 *dirp;
8136 abi_long count = arg3;
8138 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8142 ret = get_errno(sys_getdents64(arg1, dirp, count));
8143 if (!is_error(ret)) {
8144 /* Convert the dirent64 structs to target dirent. We do this
8145 * in-place, since we can guarantee that a target_dirent is no
8146 * larger than a dirent64; however this means we have to be
8147 * careful to read everything before writing in the new format.
8149 struct linux_dirent64 *de;
8150 struct target_dirent *tde;
8155 tde = (struct target_dirent *)dirp;
8157 int namelen, treclen;
8158 int reclen = de->d_reclen;
8159 uint64_t ino = de->d_ino;
8160 int64_t off = de->d_off;
8161 uint8_t type = de->d_type;
8163 namelen = strlen(de->d_name);
8164 treclen = offsetof(struct target_dirent, d_name)
8166 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8168 memmove(tde->d_name, de->d_name, namelen + 1);
8169 tde->d_ino = tswapal(ino);
8170 tde->d_off = tswapal(off);
8171 tde->d_reclen = tswap16(treclen);
8172 /* The target_dirent type is in what was formerly a padding
8173 * byte at the end of the structure:
8175 *(((char *)tde) + treclen - 1) = type;
8177 de = (struct linux_dirent64 *)((char *)de + reclen);
8178 tde = (struct target_dirent *)((char *)tde + treclen);
8184 unlock_user(dirp, arg2, ret);
8188 #endif /* TARGET_NR_getdents */
8189 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8190 case TARGET_NR_getdents64:
8192 struct linux_dirent64 *dirp;
8193 abi_long count = arg3;
8194 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8196 ret = get_errno(sys_getdents64(arg1, dirp, count));
8197 if (!is_error(ret)) {
8198 struct linux_dirent64 *de;
8203 reclen = de->d_reclen;
8206 de->d_reclen = tswap16(reclen);
8207 tswap64s((uint64_t *)&de->d_ino);
8208 tswap64s((uint64_t *)&de->d_off);
8209 de = (struct linux_dirent64 *)((char *)de + reclen);
8213 unlock_user(dirp, arg2, ret);
8216 #endif /* TARGET_NR_getdents64 */
8217 #if defined(TARGET_NR__newselect)
8218 case TARGET_NR__newselect:
8219 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8222 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8223 # ifdef TARGET_NR_poll
8224 case TARGET_NR_poll:
8226 # ifdef TARGET_NR_ppoll
8227 case TARGET_NR_ppoll:
8230 struct target_pollfd *target_pfd;
8231 unsigned int nfds = arg2;
8239 target_pfd = lock_user(VERIFY_WRITE, arg1,
8240 sizeof(struct target_pollfd) * nfds, 1);
8245 pfd = alloca(sizeof(struct pollfd) * nfds);
8246 for (i = 0; i < nfds; i++) {
8247 pfd[i].fd = tswap32(target_pfd[i].fd);
8248 pfd[i].events = tswap16(target_pfd[i].events);
8252 # ifdef TARGET_NR_ppoll
8253 if (num == TARGET_NR_ppoll) {
8254 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8255 target_sigset_t *target_set;
8256 sigset_t _set, *set = &_set;
8259 if (target_to_host_timespec(timeout_ts, arg3)) {
8260 unlock_user(target_pfd, arg1, 0);
8268 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8270 unlock_user(target_pfd, arg1, 0);
8273 target_to_host_sigset(set, target_set);
8278 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
8280 if (!is_error(ret) && arg3) {
8281 host_to_target_timespec(arg3, timeout_ts);
8284 unlock_user(target_set, arg4, 0);
8288 ret = get_errno(poll(pfd, nfds, timeout));
8290 if (!is_error(ret)) {
8291 for(i = 0; i < nfds; i++) {
8292 target_pfd[i].revents = tswap16(pfd[i].revents);
8295 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
8299 case TARGET_NR_flock:
8300 /* NOTE: the flock constant seems to be the same for every
8302 ret = get_errno(flock(arg1, arg2));
8304 case TARGET_NR_readv:
8306 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
8308 ret = get_errno(readv(arg1, vec, arg3));
8309 unlock_iovec(vec, arg2, arg3, 1);
8311 ret = -host_to_target_errno(errno);
8315 case TARGET_NR_writev:
8317 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8319 ret = get_errno(writev(arg1, vec, arg3));
8320 unlock_iovec(vec, arg2, arg3, 0);
8322 ret = -host_to_target_errno(errno);
8326 case TARGET_NR_getsid:
8327 ret = get_errno(getsid(arg1));
8329 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8330 case TARGET_NR_fdatasync:
8331 ret = get_errno(fdatasync(arg1));
8334 #ifdef TARGET_NR__sysctl
8335 case TARGET_NR__sysctl:
8336 /* We don't implement this, but ENOTDIR is always a safe
8338 ret = -TARGET_ENOTDIR;
8341 case TARGET_NR_sched_getaffinity:
8343 unsigned int mask_size;
8344 unsigned long *mask;
8347 * sched_getaffinity needs multiples of ulong, so need to take
8348 * care of mismatches between target ulong and host ulong sizes.
8350 if (arg2 & (sizeof(abi_ulong) - 1)) {
8351 ret = -TARGET_EINVAL;
8354 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8356 mask = alloca(mask_size);
8357 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
8359 if (!is_error(ret)) {
8361 /* More data returned than the caller's buffer will fit.
8362 * This only happens if sizeof(abi_long) < sizeof(long)
8363 * and the caller passed us a buffer holding an odd number
8364 * of abi_longs. If the host kernel is actually using the
8365 * extra 4 bytes then fail EINVAL; otherwise we can just
8366 * ignore them and only copy the interesting part.
8368 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
8369 if (numcpus > arg2 * 8) {
8370 ret = -TARGET_EINVAL;
8376 if (copy_to_user(arg3, mask, ret)) {
8382 case TARGET_NR_sched_setaffinity:
8384 unsigned int mask_size;
8385 unsigned long *mask;
8388 * sched_setaffinity needs multiples of ulong, so need to take
8389 * care of mismatches between target ulong and host ulong sizes.
8391 if (arg2 & (sizeof(abi_ulong) - 1)) {
8392 ret = -TARGET_EINVAL;
8395 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8397 mask = alloca(mask_size);
8398 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
8401 memcpy(mask, p, arg2);
8402 unlock_user_struct(p, arg2, 0);
8404 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
8407 case TARGET_NR_sched_setparam:
8409 struct sched_param *target_schp;
8410 struct sched_param schp;
8413 return -TARGET_EINVAL;
8415 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
8417 schp.sched_priority = tswap32(target_schp->sched_priority);
8418 unlock_user_struct(target_schp, arg2, 0);
8419 ret = get_errno(sched_setparam(arg1, &schp));
8422 case TARGET_NR_sched_getparam:
8424 struct sched_param *target_schp;
8425 struct sched_param schp;
8428 return -TARGET_EINVAL;
8430 ret = get_errno(sched_getparam(arg1, &schp));
8431 if (!is_error(ret)) {
8432 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
8434 target_schp->sched_priority = tswap32(schp.sched_priority);
8435 unlock_user_struct(target_schp, arg2, 1);
8439 case TARGET_NR_sched_setscheduler:
8441 struct sched_param *target_schp;
8442 struct sched_param schp;
8444 return -TARGET_EINVAL;
8446 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
8448 schp.sched_priority = tswap32(target_schp->sched_priority);
8449 unlock_user_struct(target_schp, arg3, 0);
8450 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
8453 case TARGET_NR_sched_getscheduler:
8454 ret = get_errno(sched_getscheduler(arg1));
8456 case TARGET_NR_sched_yield:
8457 ret = get_errno(sched_yield());
8459 case TARGET_NR_sched_get_priority_max:
8460 ret = get_errno(sched_get_priority_max(arg1));
8462 case TARGET_NR_sched_get_priority_min:
8463 ret = get_errno(sched_get_priority_min(arg1));
8465 case TARGET_NR_sched_rr_get_interval:
8468 ret = get_errno(sched_rr_get_interval(arg1, &ts));
8469 if (!is_error(ret)) {
8470 ret = host_to_target_timespec(arg2, &ts);
8474 case TARGET_NR_nanosleep:
8476 struct timespec req, rem;
8477 target_to_host_timespec(&req, arg1);
8478 ret = get_errno(nanosleep(&req, &rem));
8479 if (is_error(ret) && arg2) {
8480 host_to_target_timespec(arg2, &rem);
8484 #ifdef TARGET_NR_query_module
8485 case TARGET_NR_query_module:
8488 #ifdef TARGET_NR_nfsservctl
8489 case TARGET_NR_nfsservctl:
8492 case TARGET_NR_prctl:
8494 case PR_GET_PDEATHSIG:
8497 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
8498 if (!is_error(ret) && arg2
8499 && put_user_ual(deathsig, arg2)) {
8507 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
8511 ret = get_errno(prctl(arg1, (unsigned long)name,
8513 unlock_user(name, arg2, 16);
8518 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
8522 ret = get_errno(prctl(arg1, (unsigned long)name,
8524 unlock_user(name, arg2, 0);
8529 /* Most prctl options have no pointer arguments */
8530 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
8534 #ifdef TARGET_NR_arch_prctl
8535 case TARGET_NR_arch_prctl:
8536 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8537 ret = do_arch_prctl(cpu_env, arg1, arg2);
8543 #ifdef TARGET_NR_pread64
8544 case TARGET_NR_pread64:
8545 if (regpairs_aligned(cpu_env)) {
8549 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8551 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8552 unlock_user(p, arg2, ret);
8554 case TARGET_NR_pwrite64:
8555 if (regpairs_aligned(cpu_env)) {
8559 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8561 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8562 unlock_user(p, arg2, 0);
8565 case TARGET_NR_getcwd:
8566 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8568 ret = get_errno(sys_getcwd1(p, arg2));
8569 unlock_user(p, arg1, ret);
8571 case TARGET_NR_capget:
8572 case TARGET_NR_capset:
8574 struct target_user_cap_header *target_header;
8575 struct target_user_cap_data *target_data = NULL;
8576 struct __user_cap_header_struct header;
8577 struct __user_cap_data_struct data[2];
8578 struct __user_cap_data_struct *dataptr = NULL;
8579 int i, target_datalen;
8582 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8585 header.version = tswap32(target_header->version);
8586 header.pid = tswap32(target_header->pid);
8588 if (header.version != _LINUX_CAPABILITY_VERSION) {
8589 /* Version 2 and up takes pointer to two user_data structs */
8593 target_datalen = sizeof(*target_data) * data_items;
8596 if (num == TARGET_NR_capget) {
8597 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8599 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8602 unlock_user_struct(target_header, arg1, 0);
8606 if (num == TARGET_NR_capset) {
8607 for (i = 0; i < data_items; i++) {
8608 data[i].effective = tswap32(target_data[i].effective);
8609 data[i].permitted = tswap32(target_data[i].permitted);
8610 data[i].inheritable = tswap32(target_data[i].inheritable);
8617 if (num == TARGET_NR_capget) {
8618 ret = get_errno(capget(&header, dataptr));
8620 ret = get_errno(capset(&header, dataptr));
8623 /* The kernel always updates version for both capget and capset */
8624 target_header->version = tswap32(header.version);
8625 unlock_user_struct(target_header, arg1, 1);
8628 if (num == TARGET_NR_capget) {
8629 for (i = 0; i < data_items; i++) {
8630 target_data[i].effective = tswap32(data[i].effective);
8631 target_data[i].permitted = tswap32(data[i].permitted);
8632 target_data[i].inheritable = tswap32(data[i].inheritable);
8634 unlock_user(target_data, arg2, target_datalen);
8636 unlock_user(target_data, arg2, 0);
8641 case TARGET_NR_sigaltstack:
8642 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8645 #ifdef CONFIG_SENDFILE
8646 case TARGET_NR_sendfile:
8651 ret = get_user_sal(off, arg3);
8652 if (is_error(ret)) {
8657 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8658 if (!is_error(ret) && arg3) {
8659 abi_long ret2 = put_user_sal(off, arg3);
8660 if (is_error(ret2)) {
8666 #ifdef TARGET_NR_sendfile64
8667 case TARGET_NR_sendfile64:
8672 ret = get_user_s64(off, arg3);
8673 if (is_error(ret)) {
8678 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8679 if (!is_error(ret) && arg3) {
8680 abi_long ret2 = put_user_s64(off, arg3);
8681 if (is_error(ret2)) {
8689 case TARGET_NR_sendfile:
8690 #ifdef TARGET_NR_sendfile64
8691 case TARGET_NR_sendfile64:
8696 #ifdef TARGET_NR_getpmsg
8697 case TARGET_NR_getpmsg:
8700 #ifdef TARGET_NR_putpmsg
8701 case TARGET_NR_putpmsg:
8704 #ifdef TARGET_NR_vfork
8705 case TARGET_NR_vfork:
8706 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8710 #ifdef TARGET_NR_ugetrlimit
8711 case TARGET_NR_ugetrlimit:
8714 int resource = target_to_host_resource(arg1);
8715 ret = get_errno(getrlimit(resource, &rlim));
8716 if (!is_error(ret)) {
8717 struct target_rlimit *target_rlim;
8718 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8720 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8721 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8722 unlock_user_struct(target_rlim, arg2, 1);
8727 #ifdef TARGET_NR_truncate64
8728 case TARGET_NR_truncate64:
8729 if (!(p = lock_user_string(arg1)))
8731 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8732 unlock_user(p, arg1, 0);
8735 #ifdef TARGET_NR_ftruncate64
8736 case TARGET_NR_ftruncate64:
8737 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8740 #ifdef TARGET_NR_stat64
8741 case TARGET_NR_stat64:
8742 if (!(p = lock_user_string(arg1)))
8744 ret = get_errno(stat(path(p), &st));
8745 unlock_user(p, arg1, 0);
8747 ret = host_to_target_stat64(cpu_env, arg2, &st);
8750 #ifdef TARGET_NR_lstat64
8751 case TARGET_NR_lstat64:
8752 if (!(p = lock_user_string(arg1)))
8754 ret = get_errno(lstat(path(p), &st));
8755 unlock_user(p, arg1, 0);
8757 ret = host_to_target_stat64(cpu_env, arg2, &st);
8760 #ifdef TARGET_NR_fstat64
8761 case TARGET_NR_fstat64:
8762 ret = get_errno(fstat(arg1, &st));
8764 ret = host_to_target_stat64(cpu_env, arg2, &st);
8767 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8768 #ifdef TARGET_NR_fstatat64
8769 case TARGET_NR_fstatat64:
8771 #ifdef TARGET_NR_newfstatat
8772 case TARGET_NR_newfstatat:
8774 if (!(p = lock_user_string(arg2)))
8776 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8778 ret = host_to_target_stat64(cpu_env, arg3, &st);
8781 #ifdef TARGET_NR_lchown
8782 case TARGET_NR_lchown:
8783 if (!(p = lock_user_string(arg1)))
8785 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8786 unlock_user(p, arg1, 0);
8789 #ifdef TARGET_NR_getuid
8790 case TARGET_NR_getuid:
8791 ret = get_errno(high2lowuid(getuid()));
8794 #ifdef TARGET_NR_getgid
8795 case TARGET_NR_getgid:
8796 ret = get_errno(high2lowgid(getgid()));
8799 #ifdef TARGET_NR_geteuid
8800 case TARGET_NR_geteuid:
8801 ret = get_errno(high2lowuid(geteuid()));
8804 #ifdef TARGET_NR_getegid
8805 case TARGET_NR_getegid:
8806 ret = get_errno(high2lowgid(getegid()));
8809 case TARGET_NR_setreuid:
8810 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8812 case TARGET_NR_setregid:
8813 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8815 case TARGET_NR_getgroups:
8817 int gidsetsize = arg1;
8818 target_id *target_grouplist;
8822 grouplist = alloca(gidsetsize * sizeof(gid_t));
8823 ret = get_errno(getgroups(gidsetsize, grouplist));
8824 if (gidsetsize == 0)
8826 if (!is_error(ret)) {
8827 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8828 if (!target_grouplist)
8830 for(i = 0;i < ret; i++)
8831 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8832 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8836 case TARGET_NR_setgroups:
8838 int gidsetsize = arg1;
8839 target_id *target_grouplist;
8840 gid_t *grouplist = NULL;
8843 grouplist = alloca(gidsetsize * sizeof(gid_t));
8844 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8845 if (!target_grouplist) {
8846 ret = -TARGET_EFAULT;
8849 for (i = 0; i < gidsetsize; i++) {
8850 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8852 unlock_user(target_grouplist, arg2, 0);
8854 ret = get_errno(setgroups(gidsetsize, grouplist));
8857 case TARGET_NR_fchown:
8858 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8860 #if defined(TARGET_NR_fchownat)
8861 case TARGET_NR_fchownat:
8862 if (!(p = lock_user_string(arg2)))
8864 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8865 low2highgid(arg4), arg5));
8866 unlock_user(p, arg2, 0);
8869 #ifdef TARGET_NR_setresuid
8870 case TARGET_NR_setresuid:
8871 ret = get_errno(sys_setresuid(low2highuid(arg1),
8873 low2highuid(arg3)));
8876 #ifdef TARGET_NR_getresuid
8877 case TARGET_NR_getresuid:
8879 uid_t ruid, euid, suid;
8880 ret = get_errno(getresuid(&ruid, &euid, &suid));
8881 if (!is_error(ret)) {
8882 if (put_user_id(high2lowuid(ruid), arg1)
8883 || put_user_id(high2lowuid(euid), arg2)
8884 || put_user_id(high2lowuid(suid), arg3))
8890 #ifdef TARGET_NR_getresgid
8891 case TARGET_NR_setresgid:
8892 ret = get_errno(sys_setresgid(low2highgid(arg1),
8894 low2highgid(arg3)));
8897 #ifdef TARGET_NR_getresgid
8898 case TARGET_NR_getresgid:
8900 gid_t rgid, egid, sgid;
8901 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8902 if (!is_error(ret)) {
8903 if (put_user_id(high2lowgid(rgid), arg1)
8904 || put_user_id(high2lowgid(egid), arg2)
8905 || put_user_id(high2lowgid(sgid), arg3))
8911 #ifdef TARGET_NR_chown
8912 case TARGET_NR_chown:
8913 if (!(p = lock_user_string(arg1)))
8915 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8916 unlock_user(p, arg1, 0);
8919 case TARGET_NR_setuid:
8920 ret = get_errno(sys_setuid(low2highuid(arg1)));
8922 case TARGET_NR_setgid:
8923 ret = get_errno(sys_setgid(low2highgid(arg1)));
8925 case TARGET_NR_setfsuid:
8926 ret = get_errno(setfsuid(arg1));
8928 case TARGET_NR_setfsgid:
8929 ret = get_errno(setfsgid(arg1));
8932 #ifdef TARGET_NR_lchown32
8933 case TARGET_NR_lchown32:
8934 if (!(p = lock_user_string(arg1)))
8936 ret = get_errno(lchown(p, arg2, arg3));
8937 unlock_user(p, arg1, 0);
8940 #ifdef TARGET_NR_getuid32
8941 case TARGET_NR_getuid32:
8942 ret = get_errno(getuid());
8946 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8947 /* Alpha specific */
8948 case TARGET_NR_getxuid:
8952 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8954 ret = get_errno(getuid());
8957 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8958 /* Alpha specific */
8959 case TARGET_NR_getxgid:
8963 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8965 ret = get_errno(getgid());
8968 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8969 /* Alpha specific */
8970 case TARGET_NR_osf_getsysinfo:
8971 ret = -TARGET_EOPNOTSUPP;
8973 case TARGET_GSI_IEEE_FP_CONTROL:
8975 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8977 /* Copied from linux ieee_fpcr_to_swcr. */
8978 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8979 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8980 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8981 | SWCR_TRAP_ENABLE_DZE
8982 | SWCR_TRAP_ENABLE_OVF);
8983 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8984 | SWCR_TRAP_ENABLE_INE);
8985 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8986 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8988 if (put_user_u64 (swcr, arg2))
8994 /* case GSI_IEEE_STATE_AT_SIGNAL:
8995 -- Not implemented in linux kernel.
8997 -- Retrieves current unaligned access state; not much used.
8999 -- Retrieves implver information; surely not used.
9001 -- Grabs a copy of the HWRPB; surely not used.
9006 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9007 /* Alpha specific */
9008 case TARGET_NR_osf_setsysinfo:
9009 ret = -TARGET_EOPNOTSUPP;
9011 case TARGET_SSI_IEEE_FP_CONTROL:
9013 uint64_t swcr, fpcr, orig_fpcr;
9015 if (get_user_u64 (swcr, arg2)) {
9018 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9019 fpcr = orig_fpcr & FPCR_DYN_MASK;
9021 /* Copied from linux ieee_swcr_to_fpcr. */
9022 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9023 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9024 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9025 | SWCR_TRAP_ENABLE_DZE
9026 | SWCR_TRAP_ENABLE_OVF)) << 48;
9027 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9028 | SWCR_TRAP_ENABLE_INE)) << 57;
9029 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9030 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9032 cpu_alpha_store_fpcr(cpu_env, fpcr);
9037 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9039 uint64_t exc, fpcr, orig_fpcr;
9042 if (get_user_u64(exc, arg2)) {
9046 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9048 /* We only add to the exception status here. */
9049 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9051 cpu_alpha_store_fpcr(cpu_env, fpcr);
9054 /* Old exceptions are not signaled. */
9055 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9057 /* If any exceptions set by this call,
9058 and are unmasked, send a signal. */
9060 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9061 si_code = TARGET_FPE_FLTRES;
9063 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9064 si_code = TARGET_FPE_FLTUND;
9066 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9067 si_code = TARGET_FPE_FLTOVF;
9069 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9070 si_code = TARGET_FPE_FLTDIV;
9072 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9073 si_code = TARGET_FPE_FLTINV;
9076 target_siginfo_t info;
9077 info.si_signo = SIGFPE;
9079 info.si_code = si_code;
9080 info._sifields._sigfault._addr
9081 = ((CPUArchState *)cpu_env)->pc;
9082 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9087 /* case SSI_NVPAIRS:
9088 -- Used with SSIN_UACPROC to enable unaligned accesses.
9089 case SSI_IEEE_STATE_AT_SIGNAL:
9090 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9091 -- Not implemented in linux kernel
9096 #ifdef TARGET_NR_osf_sigprocmask
9097 /* Alpha specific. */
9098 case TARGET_NR_osf_sigprocmask:
9102 sigset_t set, oldset;
9105 case TARGET_SIG_BLOCK:
9108 case TARGET_SIG_UNBLOCK:
9111 case TARGET_SIG_SETMASK:
9115 ret = -TARGET_EINVAL;
9119 target_to_host_old_sigset(&set, &mask);
9120 do_sigprocmask(how, &set, &oldset);
9121 host_to_target_old_sigset(&mask, &oldset);
9127 #ifdef TARGET_NR_getgid32
9128 case TARGET_NR_getgid32:
9129 ret = get_errno(getgid());
9132 #ifdef TARGET_NR_geteuid32
9133 case TARGET_NR_geteuid32:
9134 ret = get_errno(geteuid());
9137 #ifdef TARGET_NR_getegid32
9138 case TARGET_NR_getegid32:
9139 ret = get_errno(getegid());
9142 #ifdef TARGET_NR_setreuid32
9143 case TARGET_NR_setreuid32:
9144 ret = get_errno(setreuid(arg1, arg2));
9147 #ifdef TARGET_NR_setregid32
9148 case TARGET_NR_setregid32:
9149 ret = get_errno(setregid(arg1, arg2));
9152 #ifdef TARGET_NR_getgroups32
9153 case TARGET_NR_getgroups32:
9155 int gidsetsize = arg1;
9156 uint32_t *target_grouplist;
9160 grouplist = alloca(gidsetsize * sizeof(gid_t));
9161 ret = get_errno(getgroups(gidsetsize, grouplist));
9162 if (gidsetsize == 0)
9164 if (!is_error(ret)) {
9165 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9166 if (!target_grouplist) {
9167 ret = -TARGET_EFAULT;
9170 for(i = 0;i < ret; i++)
9171 target_grouplist[i] = tswap32(grouplist[i]);
9172 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9177 #ifdef TARGET_NR_setgroups32
9178 case TARGET_NR_setgroups32:
9180 int gidsetsize = arg1;
9181 uint32_t *target_grouplist;
9185 grouplist = alloca(gidsetsize * sizeof(gid_t));
9186 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9187 if (!target_grouplist) {
9188 ret = -TARGET_EFAULT;
9191 for(i = 0;i < gidsetsize; i++)
9192 grouplist[i] = tswap32(target_grouplist[i]);
9193 unlock_user(target_grouplist, arg2, 0);
9194 ret = get_errno(setgroups(gidsetsize, grouplist));
9198 #ifdef TARGET_NR_fchown32
9199 case TARGET_NR_fchown32:
9200 ret = get_errno(fchown(arg1, arg2, arg3));
9203 #ifdef TARGET_NR_setresuid32
9204 case TARGET_NR_setresuid32:
9205 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
9208 #ifdef TARGET_NR_getresuid32
9209 case TARGET_NR_getresuid32:
9211 uid_t ruid, euid, suid;
9212 ret = get_errno(getresuid(&ruid, &euid, &suid));
9213 if (!is_error(ret)) {
9214 if (put_user_u32(ruid, arg1)
9215 || put_user_u32(euid, arg2)
9216 || put_user_u32(suid, arg3))
9222 #ifdef TARGET_NR_setresgid32
9223 case TARGET_NR_setresgid32:
9224 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
9227 #ifdef TARGET_NR_getresgid32
9228 case TARGET_NR_getresgid32:
9230 gid_t rgid, egid, sgid;
9231 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9232 if (!is_error(ret)) {
9233 if (put_user_u32(rgid, arg1)
9234 || put_user_u32(egid, arg2)
9235 || put_user_u32(sgid, arg3))
9241 #ifdef TARGET_NR_chown32
9242 case TARGET_NR_chown32:
9243 if (!(p = lock_user_string(arg1)))
9245 ret = get_errno(chown(p, arg2, arg3));
9246 unlock_user(p, arg1, 0);
9249 #ifdef TARGET_NR_setuid32
9250 case TARGET_NR_setuid32:
9251 ret = get_errno(sys_setuid(arg1));
9254 #ifdef TARGET_NR_setgid32
9255 case TARGET_NR_setgid32:
9256 ret = get_errno(sys_setgid(arg1));
9259 #ifdef TARGET_NR_setfsuid32
9260 case TARGET_NR_setfsuid32:
9261 ret = get_errno(setfsuid(arg1));
9264 #ifdef TARGET_NR_setfsgid32
9265 case TARGET_NR_setfsgid32:
9266 ret = get_errno(setfsgid(arg1));
9270 case TARGET_NR_pivot_root:
9272 #ifdef TARGET_NR_mincore
9273 case TARGET_NR_mincore:
9276 ret = -TARGET_EFAULT;
9277 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
9279 if (!(p = lock_user_string(arg3)))
9281 ret = get_errno(mincore(a, arg2, p));
9282 unlock_user(p, arg3, ret);
9284 unlock_user(a, arg1, 0);
9288 #ifdef TARGET_NR_arm_fadvise64_64
9289 case TARGET_NR_arm_fadvise64_64:
9292 * arm_fadvise64_64 looks like fadvise64_64 but
9293 * with different argument order
9301 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9302 #ifdef TARGET_NR_fadvise64_64
9303 case TARGET_NR_fadvise64_64:
9305 #ifdef TARGET_NR_fadvise64
9306 case TARGET_NR_fadvise64:
9310 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
9311 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
9312 case 6: arg4 = POSIX_FADV_DONTNEED; break;
9313 case 7: arg4 = POSIX_FADV_NOREUSE; break;
9317 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
9320 #ifdef TARGET_NR_madvise
9321 case TARGET_NR_madvise:
9322 /* A straight passthrough may not be safe because qemu sometimes
9323 turns private file-backed mappings into anonymous mappings.
9324 This will break MADV_DONTNEED.
9325 This is a hint, so ignoring and returning success is ok. */
9329 #if TARGET_ABI_BITS == 32
9330 case TARGET_NR_fcntl64:
9334 struct target_flock64 *target_fl;
9336 struct target_eabi_flock64 *target_efl;
9339 cmd = target_to_host_fcntl_cmd(arg2);
9340 if (cmd == -TARGET_EINVAL) {
9346 case TARGET_F_GETLK64:
9348 if (((CPUARMState *)cpu_env)->eabi) {
9349 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9351 fl.l_type = tswap16(target_efl->l_type);
9352 fl.l_whence = tswap16(target_efl->l_whence);
9353 fl.l_start = tswap64(target_efl->l_start);
9354 fl.l_len = tswap64(target_efl->l_len);
9355 fl.l_pid = tswap32(target_efl->l_pid);
9356 unlock_user_struct(target_efl, arg3, 0);
9360 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9362 fl.l_type = tswap16(target_fl->l_type);
9363 fl.l_whence = tswap16(target_fl->l_whence);
9364 fl.l_start = tswap64(target_fl->l_start);
9365 fl.l_len = tswap64(target_fl->l_len);
9366 fl.l_pid = tswap32(target_fl->l_pid);
9367 unlock_user_struct(target_fl, arg3, 0);
9369 ret = get_errno(fcntl(arg1, cmd, &fl));
9372 if (((CPUARMState *)cpu_env)->eabi) {
9373 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
9375 target_efl->l_type = tswap16(fl.l_type);
9376 target_efl->l_whence = tswap16(fl.l_whence);
9377 target_efl->l_start = tswap64(fl.l_start);
9378 target_efl->l_len = tswap64(fl.l_len);
9379 target_efl->l_pid = tswap32(fl.l_pid);
9380 unlock_user_struct(target_efl, arg3, 1);
9384 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
9386 target_fl->l_type = tswap16(fl.l_type);
9387 target_fl->l_whence = tswap16(fl.l_whence);
9388 target_fl->l_start = tswap64(fl.l_start);
9389 target_fl->l_len = tswap64(fl.l_len);
9390 target_fl->l_pid = tswap32(fl.l_pid);
9391 unlock_user_struct(target_fl, arg3, 1);
9396 case TARGET_F_SETLK64:
9397 case TARGET_F_SETLKW64:
9399 if (((CPUARMState *)cpu_env)->eabi) {
9400 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9402 fl.l_type = tswap16(target_efl->l_type);
9403 fl.l_whence = tswap16(target_efl->l_whence);
9404 fl.l_start = tswap64(target_efl->l_start);
9405 fl.l_len = tswap64(target_efl->l_len);
9406 fl.l_pid = tswap32(target_efl->l_pid);
9407 unlock_user_struct(target_efl, arg3, 0);
9411 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9413 fl.l_type = tswap16(target_fl->l_type);
9414 fl.l_whence = tswap16(target_fl->l_whence);
9415 fl.l_start = tswap64(target_fl->l_start);
9416 fl.l_len = tswap64(target_fl->l_len);
9417 fl.l_pid = tswap32(target_fl->l_pid);
9418 unlock_user_struct(target_fl, arg3, 0);
9420 ret = get_errno(fcntl(arg1, cmd, &fl));
9423 ret = do_fcntl(arg1, arg2, arg3);
9429 #ifdef TARGET_NR_cacheflush
9430 case TARGET_NR_cacheflush:
9431 /* self-modifying code is handled automatically, so nothing needed */
9435 #ifdef TARGET_NR_security
9436 case TARGET_NR_security:
9439 #ifdef TARGET_NR_getpagesize
9440 case TARGET_NR_getpagesize:
9441 ret = TARGET_PAGE_SIZE;
9444 case TARGET_NR_gettid:
9445 ret = get_errno(gettid());
9447 #ifdef TARGET_NR_readahead
9448 case TARGET_NR_readahead:
9449 #if TARGET_ABI_BITS == 32
9450 if (regpairs_aligned(cpu_env)) {
9455 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
9457 ret = get_errno(readahead(arg1, arg2, arg3));
9462 #ifdef TARGET_NR_setxattr
9463 case TARGET_NR_listxattr:
9464 case TARGET_NR_llistxattr:
9468 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9470 ret = -TARGET_EFAULT;
9474 p = lock_user_string(arg1);
9476 if (num == TARGET_NR_listxattr) {
9477 ret = get_errno(listxattr(p, b, arg3));
9479 ret = get_errno(llistxattr(p, b, arg3));
9482 ret = -TARGET_EFAULT;
9484 unlock_user(p, arg1, 0);
9485 unlock_user(b, arg2, arg3);
9488 case TARGET_NR_flistxattr:
9492 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9494 ret = -TARGET_EFAULT;
9498 ret = get_errno(flistxattr(arg1, b, arg3));
9499 unlock_user(b, arg2, arg3);
9502 case TARGET_NR_setxattr:
9503 case TARGET_NR_lsetxattr:
9505 void *p, *n, *v = 0;
9507 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9509 ret = -TARGET_EFAULT;
9513 p = lock_user_string(arg1);
9514 n = lock_user_string(arg2);
9516 if (num == TARGET_NR_setxattr) {
9517 ret = get_errno(setxattr(p, n, v, arg4, arg5));
9519 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
9522 ret = -TARGET_EFAULT;
9524 unlock_user(p, arg1, 0);
9525 unlock_user(n, arg2, 0);
9526 unlock_user(v, arg3, 0);
9529 case TARGET_NR_fsetxattr:
9533 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9535 ret = -TARGET_EFAULT;
9539 n = lock_user_string(arg2);
9541 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9543 ret = -TARGET_EFAULT;
9545 unlock_user(n, arg2, 0);
9546 unlock_user(v, arg3, 0);
9549 case TARGET_NR_getxattr:
9550 case TARGET_NR_lgetxattr:
9552 void *p, *n, *v = 0;
9554 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9556 ret = -TARGET_EFAULT;
9560 p = lock_user_string(arg1);
9561 n = lock_user_string(arg2);
9563 if (num == TARGET_NR_getxattr) {
9564 ret = get_errno(getxattr(p, n, v, arg4));
9566 ret = get_errno(lgetxattr(p, n, v, arg4));
9569 ret = -TARGET_EFAULT;
9571 unlock_user(p, arg1, 0);
9572 unlock_user(n, arg2, 0);
9573 unlock_user(v, arg3, arg4);
9576 case TARGET_NR_fgetxattr:
9580 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9582 ret = -TARGET_EFAULT;
9586 n = lock_user_string(arg2);
9588 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9590 ret = -TARGET_EFAULT;
9592 unlock_user(n, arg2, 0);
9593 unlock_user(v, arg3, arg4);
9596 case TARGET_NR_removexattr:
9597 case TARGET_NR_lremovexattr:
9600 p = lock_user_string(arg1);
9601 n = lock_user_string(arg2);
9603 if (num == TARGET_NR_removexattr) {
9604 ret = get_errno(removexattr(p, n));
9606 ret = get_errno(lremovexattr(p, n));
9609 ret = -TARGET_EFAULT;
9611 unlock_user(p, arg1, 0);
9612 unlock_user(n, arg2, 0);
9615 case TARGET_NR_fremovexattr:
9618 n = lock_user_string(arg2);
9620 ret = get_errno(fremovexattr(arg1, n));
9622 ret = -TARGET_EFAULT;
9624 unlock_user(n, arg2, 0);
9628 #endif /* CONFIG_ATTR */
9629 #ifdef TARGET_NR_set_thread_area
9630 case TARGET_NR_set_thread_area:
9631 #if defined(TARGET_MIPS)
9632 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9635 #elif defined(TARGET_CRIS)
9637 ret = -TARGET_EINVAL;
9639 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9643 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9644 ret = do_set_thread_area(cpu_env, arg1);
9646 #elif defined(TARGET_M68K)
9648 TaskState *ts = cpu->opaque;
9649 ts->tp_value = arg1;
9654 goto unimplemented_nowarn;
9657 #ifdef TARGET_NR_get_thread_area
9658 case TARGET_NR_get_thread_area:
9659 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9660 ret = do_get_thread_area(cpu_env, arg1);
9662 #elif defined(TARGET_M68K)
9664 TaskState *ts = cpu->opaque;
9669 goto unimplemented_nowarn;
9672 #ifdef TARGET_NR_getdomainname
9673 case TARGET_NR_getdomainname:
9674 goto unimplemented_nowarn;
9677 #ifdef TARGET_NR_clock_gettime
9678 case TARGET_NR_clock_gettime:
9681 ret = get_errno(clock_gettime(arg1, &ts));
9682 if (!is_error(ret)) {
9683 host_to_target_timespec(arg2, &ts);
9688 #ifdef TARGET_NR_clock_getres
9689 case TARGET_NR_clock_getres:
9692 ret = get_errno(clock_getres(arg1, &ts));
9693 if (!is_error(ret)) {
9694 host_to_target_timespec(arg2, &ts);
9699 #ifdef TARGET_NR_clock_nanosleep
9700 case TARGET_NR_clock_nanosleep:
9703 target_to_host_timespec(&ts, arg3);
9704 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9706 host_to_target_timespec(arg4, &ts);
9708 #if defined(TARGET_PPC)
9709 /* clock_nanosleep is odd in that it returns positive errno values.
9710 * On PPC, CR0 bit 3 should be set in such a situation. */
9712 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9719 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9720 case TARGET_NR_set_tid_address:
9721 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9725 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9726 case TARGET_NR_tkill:
9727 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9731 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9732 case TARGET_NR_tgkill:
9733 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9734 target_to_host_signal(arg3)));
9738 #ifdef TARGET_NR_set_robust_list
9739 case TARGET_NR_set_robust_list:
9740 case TARGET_NR_get_robust_list:
9741 /* The ABI for supporting robust futexes has userspace pass
9742 * the kernel a pointer to a linked list which is updated by
9743 * userspace after the syscall; the list is walked by the kernel
9744 * when the thread exits. Since the linked list in QEMU guest
9745 * memory isn't a valid linked list for the host and we have
9746 * no way to reliably intercept the thread-death event, we can't
9747 * support these. Silently return ENOSYS so that guest userspace
9748 * falls back to a non-robust futex implementation (which should
9749 * be OK except in the corner case of the guest crashing while
9750 * holding a mutex that is shared with another process via
9753 goto unimplemented_nowarn;
9756 #if defined(TARGET_NR_utimensat)
9757 case TARGET_NR_utimensat:
9759 struct timespec *tsp, ts[2];
9763 target_to_host_timespec(ts, arg3);
9764 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9768 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9770 if (!(p = lock_user_string(arg2))) {
9771 ret = -TARGET_EFAULT;
9774 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9775 unlock_user(p, arg2, 0);
9780 case TARGET_NR_futex:
9781 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9783 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9784 case TARGET_NR_inotify_init:
9785 ret = get_errno(sys_inotify_init());
9788 #ifdef CONFIG_INOTIFY1
9789 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9790 case TARGET_NR_inotify_init1:
9791 ret = get_errno(sys_inotify_init1(arg1));
9795 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9796 case TARGET_NR_inotify_add_watch:
9797 p = lock_user_string(arg2);
9798 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9799 unlock_user(p, arg2, 0);
9802 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9803 case TARGET_NR_inotify_rm_watch:
9804 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9808 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9809 case TARGET_NR_mq_open:
9811 struct mq_attr posix_mq_attr, *attrp;
9813 p = lock_user_string(arg1 - 1);
9815 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9816 attrp = &posix_mq_attr;
9820 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9821 unlock_user (p, arg1, 0);
9825 case TARGET_NR_mq_unlink:
9826 p = lock_user_string(arg1 - 1);
9827 ret = get_errno(mq_unlink(p));
9828 unlock_user (p, arg1, 0);
9831 case TARGET_NR_mq_timedsend:
9835 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9837 target_to_host_timespec(&ts, arg5);
9838 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9839 host_to_target_timespec(arg5, &ts);
9842 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9843 unlock_user (p, arg2, arg3);
9847 case TARGET_NR_mq_timedreceive:
9852 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9854 target_to_host_timespec(&ts, arg5);
9855 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9856 host_to_target_timespec(arg5, &ts);
9859 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9860 unlock_user (p, arg2, arg3);
9862 put_user_u32(prio, arg4);
9866 /* Not implemented for now... */
9867 /* case TARGET_NR_mq_notify: */
9870 case TARGET_NR_mq_getsetattr:
9872 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9875 ret = mq_getattr(arg1, &posix_mq_attr_out);
9876 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9879 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9880 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9887 #ifdef CONFIG_SPLICE
9888 #ifdef TARGET_NR_tee
9891 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9895 #ifdef TARGET_NR_splice
9896 case TARGET_NR_splice:
9898 loff_t loff_in, loff_out;
9899 loff_t *ploff_in = NULL, *ploff_out = NULL;
9901 if (get_user_u64(loff_in, arg2)) {
9904 ploff_in = &loff_in;
9907 if (get_user_u64(loff_out, arg4)) {
9910 ploff_out = &loff_out;
9912 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9914 if (put_user_u64(loff_in, arg2)) {
9919 if (put_user_u64(loff_out, arg4)) {
9926 #ifdef TARGET_NR_vmsplice
9927 case TARGET_NR_vmsplice:
9929 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9931 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9932 unlock_iovec(vec, arg2, arg3, 0);
9934 ret = -host_to_target_errno(errno);
9939 #endif /* CONFIG_SPLICE */
9940 #ifdef CONFIG_EVENTFD
9941 #if defined(TARGET_NR_eventfd)
9942 case TARGET_NR_eventfd:
9943 ret = get_errno(eventfd(arg1, 0));
9944 fd_trans_unregister(ret);
9947 #if defined(TARGET_NR_eventfd2)
9948 case TARGET_NR_eventfd2:
9950 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9951 if (arg2 & TARGET_O_NONBLOCK) {
9952 host_flags |= O_NONBLOCK;
9954 if (arg2 & TARGET_O_CLOEXEC) {
9955 host_flags |= O_CLOEXEC;
9957 ret = get_errno(eventfd(arg1, host_flags));
9958 fd_trans_unregister(ret);
9962 #endif /* CONFIG_EVENTFD */
9963 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9964 case TARGET_NR_fallocate:
9965 #if TARGET_ABI_BITS == 32
9966 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9967 target_offset64(arg5, arg6)));
9969 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9973 #if defined(CONFIG_SYNC_FILE_RANGE)
9974 #if defined(TARGET_NR_sync_file_range)
9975 case TARGET_NR_sync_file_range:
9976 #if TARGET_ABI_BITS == 32
9977 #if defined(TARGET_MIPS)
9978 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9979 target_offset64(arg5, arg6), arg7));
9981 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9982 target_offset64(arg4, arg5), arg6));
9983 #endif /* !TARGET_MIPS */
9985 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9989 #if defined(TARGET_NR_sync_file_range2)
9990 case TARGET_NR_sync_file_range2:
9991 /* This is like sync_file_range but the arguments are reordered */
9992 #if TARGET_ABI_BITS == 32
9993 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9994 target_offset64(arg5, arg6), arg2));
9996 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10001 #if defined(TARGET_NR_signalfd4)
10002 case TARGET_NR_signalfd4:
10003 ret = do_signalfd4(arg1, arg2, arg4);
10006 #if defined(TARGET_NR_signalfd)
10007 case TARGET_NR_signalfd:
10008 ret = do_signalfd4(arg1, arg2, 0);
10011 #if defined(CONFIG_EPOLL)
10012 #if defined(TARGET_NR_epoll_create)
10013 case TARGET_NR_epoll_create:
10014 ret = get_errno(epoll_create(arg1));
10017 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10018 case TARGET_NR_epoll_create1:
10019 ret = get_errno(epoll_create1(arg1));
10022 #if defined(TARGET_NR_epoll_ctl)
10023 case TARGET_NR_epoll_ctl:
10025 struct epoll_event ep;
10026 struct epoll_event *epp = 0;
10028 struct target_epoll_event *target_ep;
10029 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10032 ep.events = tswap32(target_ep->events);
10033 /* The epoll_data_t union is just opaque data to the kernel,
10034 * so we transfer all 64 bits across and need not worry what
10035 * actual data type it is.
10037 ep.data.u64 = tswap64(target_ep->data.u64);
10038 unlock_user_struct(target_ep, arg4, 0);
10041 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10046 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10047 #define IMPLEMENT_EPOLL_PWAIT
10049 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10050 #if defined(TARGET_NR_epoll_wait)
10051 case TARGET_NR_epoll_wait:
10053 #if defined(IMPLEMENT_EPOLL_PWAIT)
10054 case TARGET_NR_epoll_pwait:
10057 struct target_epoll_event *target_ep;
10058 struct epoll_event *ep;
10060 int maxevents = arg3;
10061 int timeout = arg4;
10063 target_ep = lock_user(VERIFY_WRITE, arg2,
10064 maxevents * sizeof(struct target_epoll_event), 1);
10069 ep = alloca(maxevents * sizeof(struct epoll_event));
10072 #if defined(IMPLEMENT_EPOLL_PWAIT)
10073 case TARGET_NR_epoll_pwait:
10075 target_sigset_t *target_set;
10076 sigset_t _set, *set = &_set;
10079 target_set = lock_user(VERIFY_READ, arg5,
10080 sizeof(target_sigset_t), 1);
10082 unlock_user(target_ep, arg2, 0);
10085 target_to_host_sigset(set, target_set);
10086 unlock_user(target_set, arg5, 0);
10091 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
10095 #if defined(TARGET_NR_epoll_wait)
10096 case TARGET_NR_epoll_wait:
10097 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
10101 ret = -TARGET_ENOSYS;
10103 if (!is_error(ret)) {
10105 for (i = 0; i < ret; i++) {
10106 target_ep[i].events = tswap32(ep[i].events);
10107 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10110 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10115 #ifdef TARGET_NR_prlimit64
10116 case TARGET_NR_prlimit64:
10118 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10119 struct target_rlimit64 *target_rnew, *target_rold;
10120 struct host_rlimit64 rnew, rold, *rnewp = 0;
10121 int resource = target_to_host_resource(arg2);
10123 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10126 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10127 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10128 unlock_user_struct(target_rnew, arg3, 0);
10132 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10133 if (!is_error(ret) && arg4) {
10134 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10137 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10138 target_rold->rlim_max = tswap64(rold.rlim_max);
10139 unlock_user_struct(target_rold, arg4, 1);
10144 #ifdef TARGET_NR_gethostname
10145 case TARGET_NR_gethostname:
10147 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10149 ret = get_errno(gethostname(name, arg2));
10150 unlock_user(name, arg1, arg2);
10152 ret = -TARGET_EFAULT;
10157 #ifdef TARGET_NR_atomic_cmpxchg_32
10158 case TARGET_NR_atomic_cmpxchg_32:
10160 /* should use start_exclusive from main.c */
10161 abi_ulong mem_value;
10162 if (get_user_u32(mem_value, arg6)) {
10163 target_siginfo_t info;
10164 info.si_signo = SIGSEGV;
10166 info.si_code = TARGET_SEGV_MAPERR;
10167 info._sifields._sigfault._addr = arg6;
10168 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10172 if (mem_value == arg2)
10173 put_user_u32(arg1, arg6);
10178 #ifdef TARGET_NR_atomic_barrier
10179 case TARGET_NR_atomic_barrier:
10181 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10187 #ifdef TARGET_NR_timer_create
10188 case TARGET_NR_timer_create:
10190 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10192 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10195 int timer_index = next_free_host_timer();
10197 if (timer_index < 0) {
10198 ret = -TARGET_EAGAIN;
10200 timer_t *phtimer = g_posix_timers + timer_index;
10203 phost_sevp = &host_sevp;
10204 ret = target_to_host_sigevent(phost_sevp, arg2);
10210 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10214 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10223 #ifdef TARGET_NR_timer_settime
10224 case TARGET_NR_timer_settime:
10226 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10227 * struct itimerspec * old_value */
10228 target_timer_t timerid = get_timer_id(arg1);
10232 } else if (arg3 == 0) {
10233 ret = -TARGET_EINVAL;
10235 timer_t htimer = g_posix_timers[timerid];
10236 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
10238 target_to_host_itimerspec(&hspec_new, arg3);
10240 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
10241 host_to_target_itimerspec(arg2, &hspec_old);
10247 #ifdef TARGET_NR_timer_gettime
10248 case TARGET_NR_timer_gettime:
10250 /* args: timer_t timerid, struct itimerspec *curr_value */
10251 target_timer_t timerid = get_timer_id(arg1);
10255 } else if (!arg2) {
10256 ret = -TARGET_EFAULT;
10258 timer_t htimer = g_posix_timers[timerid];
10259 struct itimerspec hspec;
10260 ret = get_errno(timer_gettime(htimer, &hspec));
10262 if (host_to_target_itimerspec(arg2, &hspec)) {
10263 ret = -TARGET_EFAULT;
10270 #ifdef TARGET_NR_timer_getoverrun
10271 case TARGET_NR_timer_getoverrun:
10273 /* args: timer_t timerid */
10274 target_timer_t timerid = get_timer_id(arg1);
10279 timer_t htimer = g_posix_timers[timerid];
10280 ret = get_errno(timer_getoverrun(htimer));
10282 fd_trans_unregister(ret);
10287 #ifdef TARGET_NR_timer_delete
10288 case TARGET_NR_timer_delete:
10290 /* args: timer_t timerid */
10291 target_timer_t timerid = get_timer_id(arg1);
10296 timer_t htimer = g_posix_timers[timerid];
10297 ret = get_errno(timer_delete(htimer));
10298 g_posix_timers[timerid] = 0;
10304 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10305 case TARGET_NR_timerfd_create:
10306 ret = get_errno(timerfd_create(arg1,
10307 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
10311 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10312 case TARGET_NR_timerfd_gettime:
10314 struct itimerspec its_curr;
10316 ret = get_errno(timerfd_gettime(arg1, &its_curr));
10318 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
10325 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10326 case TARGET_NR_timerfd_settime:
10328 struct itimerspec its_new, its_old, *p_new;
10331 if (target_to_host_itimerspec(&its_new, arg3)) {
10339 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
10341 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
10348 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10349 case TARGET_NR_ioprio_get:
10350 ret = get_errno(ioprio_get(arg1, arg2));
10354 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10355 case TARGET_NR_ioprio_set:
10356 ret = get_errno(ioprio_set(arg1, arg2, arg3));
10360 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10361 case TARGET_NR_setns:
10362 ret = get_errno(setns(arg1, arg2));
10365 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10366 case TARGET_NR_unshare:
10367 ret = get_errno(unshare(arg1));
10373 gemu_log("qemu: Unsupported syscall: %d\n", num);
10374 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10375 unimplemented_nowarn:
10377 ret = -TARGET_ENOSYS;
10382 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
10385 print_syscall_ret(num, ret);
10388 ret = -TARGET_EFAULT;