4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
46 #include <linux/capability.h>
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
53 #include <sys/socket.h>
57 #include <sys/times.h>
60 #include <sys/statfs.h>
62 #include <sys/sysinfo.h>
63 #include <sys/signalfd.h>
64 //#include <sys/user.h>
65 #include <netinet/ip.h>
66 #include <netinet/tcp.h>
67 #include <linux/wireless.h>
68 #include <linux/icmp.h>
69 #include "qemu-common.h"
71 #include <sys/timerfd.h>
77 #include <sys/eventfd.h>
80 #include <sys/epoll.h>
83 #include "qemu/xattr.h"
85 #ifdef CONFIG_SENDFILE
86 #include <sys/sendfile.h>
89 #define termios host_termios
90 #define winsize host_winsize
91 #define termio host_termio
92 #define sgttyb host_sgttyb /* same as target */
93 #define tchars host_tchars /* same as target */
94 #define ltchars host_ltchars /* same as target */
96 #include <linux/termios.h>
97 #include <linux/unistd.h>
98 #include <linux/cdrom.h>
99 #include <linux/hdreg.h>
100 #include <linux/soundcard.h>
101 #include <linux/kd.h>
102 #include <linux/mtio.h>
103 #include <linux/fs.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
107 #include <linux/fb.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include "linux_loop.h"
119 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
120 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
137 #define _syscall0(type,name) \
138 static type name (void) \
140 return syscall(__NR_##name); \
143 #define _syscall1(type,name,type1,arg1) \
144 static type name (type1 arg1) \
146 return syscall(__NR_##name, arg1); \
149 #define _syscall2(type,name,type1,arg1,type2,arg2) \
150 static type name (type1 arg1,type2 arg2) \
152 return syscall(__NR_##name, arg1, arg2); \
155 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
156 static type name (type1 arg1,type2 arg2,type3 arg3) \
158 return syscall(__NR_##name, arg1, arg2, arg3); \
161 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
162 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
164 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
167 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
169 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
175 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
176 type5,arg5,type6,arg6) \
177 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
180 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
184 #define __NR_sys_uname __NR_uname
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_futex __NR_futex
194 #define __NR_sys_inotify_init __NR_inotify_init
195 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
196 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
198 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
200 #define __NR__llseek __NR_lseek
203 /* Newer kernel ports have llseek() instead of _llseek() */
204 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
205 #define TARGET_NR__llseek TARGET_NR_llseek
209 _syscall0(int, gettid)
211 /* This is a replacement for the host gettid() and must return a host
213 static int gettid(void) {
217 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
218 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
220 #if !defined(__NR_getdents) || \
221 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
222 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
224 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
225 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
226 loff_t *, res, uint, wh);
228 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
229 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
230 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
231 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
233 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
234 _syscall2(int,sys_tkill,int,tid,int,sig)
236 #ifdef __NR_exit_group
237 _syscall1(int,exit_group,int,error_code)
239 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
240 _syscall1(int,set_tid_address,int *,tidptr)
242 #if defined(TARGET_NR_futex) && defined(__NR_futex)
243 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
244 const struct timespec *,timeout,int *,uaddr2,int,val3)
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
252 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
254 _syscall2(int, capget, struct __user_cap_header_struct *, header,
255 struct __user_cap_data_struct *, data);
256 _syscall2(int, capset, struct __user_cap_header_struct *, header,
257 struct __user_cap_data_struct *, data);
258 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
259 _syscall2(int, ioprio_get, int, which, int, who)
261 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
262 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
265 static bitmask_transtbl fcntl_flags_tbl[] = {
266 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
267 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
268 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
269 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
270 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
271 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
272 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
273 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
274 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
275 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
276 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
277 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
278 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
279 #if defined(O_DIRECT)
280 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
282 #if defined(O_NOATIME)
283 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
285 #if defined(O_CLOEXEC)
286 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
289 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
291 /* Don't terminate the list prematurely on 64-bit host+guest. */
292 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
293 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
298 typedef abi_long (*TargetFdFunc)(void *, size_t);
299 typedef struct TargetFdTrans {
300 TargetFdFunc host_to_target;
301 TargetFdFunc target_to_host;
304 static TargetFdTrans **target_fd_trans;
306 static unsigned int target_fd_max;
308 static TargetFdFunc fd_trans_host_to_target(int fd)
310 if (fd < target_fd_max && target_fd_trans[fd]) {
311 return target_fd_trans[fd]->host_to_target;
316 static void fd_trans_register(int fd, TargetFdTrans *trans)
320 if (fd >= target_fd_max) {
321 oldmax = target_fd_max;
322 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
323 target_fd_trans = g_realloc(target_fd_trans,
324 target_fd_max * sizeof(TargetFdTrans));
325 memset((void *)(target_fd_trans + oldmax), 0,
326 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
328 target_fd_trans[fd] = trans;
331 static void fd_trans_unregister(int fd)
333 if (fd >= 0 && fd < target_fd_max) {
334 target_fd_trans[fd] = NULL;
338 static void fd_trans_dup(int oldfd, int newfd)
340 fd_trans_unregister(newfd);
341 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
342 fd_trans_register(newfd, target_fd_trans[oldfd]);
346 static int sys_getcwd1(char *buf, size_t size)
348 if (getcwd(buf, size) == NULL) {
349 /* getcwd() sets errno */
352 return strlen(buf)+1;
355 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
358 * open(2) has extra parameter 'mode' when called with
361 if ((flags & O_CREAT) != 0) {
362 return (openat(dirfd, pathname, flags, mode));
364 return (openat(dirfd, pathname, flags));
367 #ifdef TARGET_NR_utimensat
368 #ifdef CONFIG_UTIMENSAT
369 static int sys_utimensat(int dirfd, const char *pathname,
370 const struct timespec times[2], int flags)
372 if (pathname == NULL)
373 return futimens(dirfd, times);
375 return utimensat(dirfd, pathname, times, flags);
377 #elif defined(__NR_utimensat)
378 #define __NR_sys_utimensat __NR_utimensat
379 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
380 const struct timespec *,tsp,int,flags)
382 static int sys_utimensat(int dirfd, const char *pathname,
383 const struct timespec times[2], int flags)
389 #endif /* TARGET_NR_utimensat */
391 #ifdef CONFIG_INOTIFY
392 #include <sys/inotify.h>
394 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
395 static int sys_inotify_init(void)
397 return (inotify_init());
400 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
401 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
403 return (inotify_add_watch(fd, pathname, mask));
406 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
407 static int sys_inotify_rm_watch(int fd, int32_t wd)
409 return (inotify_rm_watch(fd, wd));
412 #ifdef CONFIG_INOTIFY1
413 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
414 static int sys_inotify_init1(int flags)
416 return (inotify_init1(flags));
421 /* Userspace can usually survive runtime without inotify */
422 #undef TARGET_NR_inotify_init
423 #undef TARGET_NR_inotify_init1
424 #undef TARGET_NR_inotify_add_watch
425 #undef TARGET_NR_inotify_rm_watch
426 #endif /* CONFIG_INOTIFY */
428 #if defined(TARGET_NR_ppoll)
430 # define __NR_ppoll -1
432 #define __NR_sys_ppoll __NR_ppoll
433 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
434 struct timespec *, timeout, const sigset_t *, sigmask,
438 #if defined(TARGET_NR_pselect6)
439 #ifndef __NR_pselect6
440 # define __NR_pselect6 -1
442 #define __NR_sys_pselect6 __NR_pselect6
443 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
444 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
447 #if defined(TARGET_NR_prlimit64)
448 #ifndef __NR_prlimit64
449 # define __NR_prlimit64 -1
451 #define __NR_sys_prlimit64 __NR_prlimit64
452 /* The glibc rlimit structure may not be that used by the underlying syscall */
453 struct host_rlimit64 {
457 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
458 const struct host_rlimit64 *, new_limit,
459 struct host_rlimit64 *, old_limit)
463 #if defined(TARGET_NR_timer_create)
464 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
465 static timer_t g_posix_timers[32] = { 0, } ;
467 static inline int next_free_host_timer(void)
470 /* FIXME: Does finding the next free slot require a lock? */
471 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
472 if (g_posix_timers[k] == 0) {
473 g_posix_timers[k] = (timer_t) 1;
481 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
483 static inline int regpairs_aligned(void *cpu_env) {
484 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
486 #elif defined(TARGET_MIPS)
487 static inline int regpairs_aligned(void *cpu_env) { return 1; }
488 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
489 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
490 * of registers which translates to the same as ARM/MIPS, because we start with
492 static inline int regpairs_aligned(void *cpu_env) { return 1; }
494 static inline int regpairs_aligned(void *cpu_env) { return 0; }
497 #define ERRNO_TABLE_SIZE 1200
499 /* target_to_host_errno_table[] is initialized from
500 * host_to_target_errno_table[] in syscall_init(). */
501 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
505 * This list is the union of errno values overridden in asm-<arch>/errno.h
506 * minus the errnos that are not actually generic to all archs.
508 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
509 [EAGAIN] = TARGET_EAGAIN,
510 [EIDRM] = TARGET_EIDRM,
511 [ECHRNG] = TARGET_ECHRNG,
512 [EL2NSYNC] = TARGET_EL2NSYNC,
513 [EL3HLT] = TARGET_EL3HLT,
514 [EL3RST] = TARGET_EL3RST,
515 [ELNRNG] = TARGET_ELNRNG,
516 [EUNATCH] = TARGET_EUNATCH,
517 [ENOCSI] = TARGET_ENOCSI,
518 [EL2HLT] = TARGET_EL2HLT,
519 [EDEADLK] = TARGET_EDEADLK,
520 [ENOLCK] = TARGET_ENOLCK,
521 [EBADE] = TARGET_EBADE,
522 [EBADR] = TARGET_EBADR,
523 [EXFULL] = TARGET_EXFULL,
524 [ENOANO] = TARGET_ENOANO,
525 [EBADRQC] = TARGET_EBADRQC,
526 [EBADSLT] = TARGET_EBADSLT,
527 [EBFONT] = TARGET_EBFONT,
528 [ENOSTR] = TARGET_ENOSTR,
529 [ENODATA] = TARGET_ENODATA,
530 [ETIME] = TARGET_ETIME,
531 [ENOSR] = TARGET_ENOSR,
532 [ENONET] = TARGET_ENONET,
533 [ENOPKG] = TARGET_ENOPKG,
534 [EREMOTE] = TARGET_EREMOTE,
535 [ENOLINK] = TARGET_ENOLINK,
536 [EADV] = TARGET_EADV,
537 [ESRMNT] = TARGET_ESRMNT,
538 [ECOMM] = TARGET_ECOMM,
539 [EPROTO] = TARGET_EPROTO,
540 [EDOTDOT] = TARGET_EDOTDOT,
541 [EMULTIHOP] = TARGET_EMULTIHOP,
542 [EBADMSG] = TARGET_EBADMSG,
543 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
544 [EOVERFLOW] = TARGET_EOVERFLOW,
545 [ENOTUNIQ] = TARGET_ENOTUNIQ,
546 [EBADFD] = TARGET_EBADFD,
547 [EREMCHG] = TARGET_EREMCHG,
548 [ELIBACC] = TARGET_ELIBACC,
549 [ELIBBAD] = TARGET_ELIBBAD,
550 [ELIBSCN] = TARGET_ELIBSCN,
551 [ELIBMAX] = TARGET_ELIBMAX,
552 [ELIBEXEC] = TARGET_ELIBEXEC,
553 [EILSEQ] = TARGET_EILSEQ,
554 [ENOSYS] = TARGET_ENOSYS,
555 [ELOOP] = TARGET_ELOOP,
556 [ERESTART] = TARGET_ERESTART,
557 [ESTRPIPE] = TARGET_ESTRPIPE,
558 [ENOTEMPTY] = TARGET_ENOTEMPTY,
559 [EUSERS] = TARGET_EUSERS,
560 [ENOTSOCK] = TARGET_ENOTSOCK,
561 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
562 [EMSGSIZE] = TARGET_EMSGSIZE,
563 [EPROTOTYPE] = TARGET_EPROTOTYPE,
564 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
565 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
566 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
567 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
568 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
569 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
570 [EADDRINUSE] = TARGET_EADDRINUSE,
571 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
572 [ENETDOWN] = TARGET_ENETDOWN,
573 [ENETUNREACH] = TARGET_ENETUNREACH,
574 [ENETRESET] = TARGET_ENETRESET,
575 [ECONNABORTED] = TARGET_ECONNABORTED,
576 [ECONNRESET] = TARGET_ECONNRESET,
577 [ENOBUFS] = TARGET_ENOBUFS,
578 [EISCONN] = TARGET_EISCONN,
579 [ENOTCONN] = TARGET_ENOTCONN,
580 [EUCLEAN] = TARGET_EUCLEAN,
581 [ENOTNAM] = TARGET_ENOTNAM,
582 [ENAVAIL] = TARGET_ENAVAIL,
583 [EISNAM] = TARGET_EISNAM,
584 [EREMOTEIO] = TARGET_EREMOTEIO,
585 [ESHUTDOWN] = TARGET_ESHUTDOWN,
586 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
587 [ETIMEDOUT] = TARGET_ETIMEDOUT,
588 [ECONNREFUSED] = TARGET_ECONNREFUSED,
589 [EHOSTDOWN] = TARGET_EHOSTDOWN,
590 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
591 [EALREADY] = TARGET_EALREADY,
592 [EINPROGRESS] = TARGET_EINPROGRESS,
593 [ESTALE] = TARGET_ESTALE,
594 [ECANCELED] = TARGET_ECANCELED,
595 [ENOMEDIUM] = TARGET_ENOMEDIUM,
596 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
598 [ENOKEY] = TARGET_ENOKEY,
601 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
604 [EKEYREVOKED] = TARGET_EKEYREVOKED,
607 [EKEYREJECTED] = TARGET_EKEYREJECTED,
610 [EOWNERDEAD] = TARGET_EOWNERDEAD,
612 #ifdef ENOTRECOVERABLE
613 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
617 static inline int host_to_target_errno(int err)
619 if(host_to_target_errno_table[err])
620 return host_to_target_errno_table[err];
624 static inline int target_to_host_errno(int err)
626 if (target_to_host_errno_table[err])
627 return target_to_host_errno_table[err];
631 static inline abi_long get_errno(abi_long ret)
634 return -host_to_target_errno(errno);
639 static inline int is_error(abi_long ret)
641 return (abi_ulong)ret >= (abi_ulong)(-4096);
644 char *target_strerror(int err)
646 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
649 return strerror(target_to_host_errno(err));
652 static inline int host_to_target_sock_type(int host_type)
656 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
658 target_type = TARGET_SOCK_DGRAM;
661 target_type = TARGET_SOCK_STREAM;
664 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
668 #if defined(SOCK_CLOEXEC)
669 if (host_type & SOCK_CLOEXEC) {
670 target_type |= TARGET_SOCK_CLOEXEC;
674 #if defined(SOCK_NONBLOCK)
675 if (host_type & SOCK_NONBLOCK) {
676 target_type |= TARGET_SOCK_NONBLOCK;
683 static abi_ulong target_brk;
684 static abi_ulong target_original_brk;
685 static abi_ulong brk_page;
687 void target_set_brk(abi_ulong new_brk)
689 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
690 brk_page = HOST_PAGE_ALIGN(target_brk);
693 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
694 #define DEBUGF_BRK(message, args...)
696 /* do_brk() must return target values and target errnos. */
697 abi_long do_brk(abi_ulong new_brk)
699 abi_long mapped_addr;
702 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
705 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
708 if (new_brk < target_original_brk) {
709 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
714 /* If the new brk is less than the highest page reserved to the
715 * target heap allocation, set it and we're almost done... */
716 if (new_brk <= brk_page) {
717 /* Heap contents are initialized to zero, as for anonymous
719 if (new_brk > target_brk) {
720 memset(g2h(target_brk), 0, new_brk - target_brk);
722 target_brk = new_brk;
723 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
727 /* We need to allocate more memory after the brk... Note that
728 * we don't use MAP_FIXED because that will map over the top of
729 * any existing mapping (like the one with the host libc or qemu
730 * itself); instead we treat "mapped but at wrong address" as
731 * a failure and unmap again.
733 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
734 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
735 PROT_READ|PROT_WRITE,
736 MAP_ANON|MAP_PRIVATE, 0, 0));
738 if (mapped_addr == brk_page) {
739 /* Heap contents are initialized to zero, as for anonymous
740 * mapped pages. Technically the new pages are already
741 * initialized to zero since they *are* anonymous mapped
742 * pages, however we have to take care with the contents that
743 * come from the remaining part of the previous page: it may
744 * contains garbage data due to a previous heap usage (grown
746 memset(g2h(target_brk), 0, brk_page - target_brk);
748 target_brk = new_brk;
749 brk_page = HOST_PAGE_ALIGN(target_brk);
750 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
753 } else if (mapped_addr != -1) {
754 /* Mapped but at wrong address, meaning there wasn't actually
755 * enough space for this brk.
757 target_munmap(mapped_addr, new_alloc_size);
759 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
762 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
765 #if defined(TARGET_ALPHA)
766 /* We (partially) emulate OSF/1 on Alpha, which requires we
767 return a proper errno, not an unchanged brk value. */
768 return -TARGET_ENOMEM;
770 /* For everything else, return the previous break. */
774 static inline abi_long copy_from_user_fdset(fd_set *fds,
775 abi_ulong target_fds_addr,
779 abi_ulong b, *target_fds;
781 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
782 if (!(target_fds = lock_user(VERIFY_READ,
784 sizeof(abi_ulong) * nw,
786 return -TARGET_EFAULT;
790 for (i = 0; i < nw; i++) {
791 /* grab the abi_ulong */
792 __get_user(b, &target_fds[i]);
793 for (j = 0; j < TARGET_ABI_BITS; j++) {
794 /* check the bit inside the abi_ulong */
801 unlock_user(target_fds, target_fds_addr, 0);
806 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
807 abi_ulong target_fds_addr,
810 if (target_fds_addr) {
811 if (copy_from_user_fdset(fds, target_fds_addr, n))
812 return -TARGET_EFAULT;
820 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
826 abi_ulong *target_fds;
828 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
829 if (!(target_fds = lock_user(VERIFY_WRITE,
831 sizeof(abi_ulong) * nw,
833 return -TARGET_EFAULT;
836 for (i = 0; i < nw; i++) {
838 for (j = 0; j < TARGET_ABI_BITS; j++) {
839 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
842 __put_user(v, &target_fds[i]);
845 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
850 #if defined(__alpha__)
856 static inline abi_long host_to_target_clock_t(long ticks)
858 #if HOST_HZ == TARGET_HZ
861 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
865 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
866 const struct rusage *rusage)
868 struct target_rusage *target_rusage;
870 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
871 return -TARGET_EFAULT;
872 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
873 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
874 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
875 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
876 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
877 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
878 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
879 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
880 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
881 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
882 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
883 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
884 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
885 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
886 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
887 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
888 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
889 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
890 unlock_user_struct(target_rusage, target_addr, 1);
895 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
897 abi_ulong target_rlim_swap;
900 target_rlim_swap = tswapal(target_rlim);
901 if (target_rlim_swap == TARGET_RLIM_INFINITY)
902 return RLIM_INFINITY;
904 result = target_rlim_swap;
905 if (target_rlim_swap != (rlim_t)result)
906 return RLIM_INFINITY;
911 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
913 abi_ulong target_rlim_swap;
916 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
917 target_rlim_swap = TARGET_RLIM_INFINITY;
919 target_rlim_swap = rlim;
920 result = tswapal(target_rlim_swap);
925 static inline int target_to_host_resource(int code)
928 case TARGET_RLIMIT_AS:
930 case TARGET_RLIMIT_CORE:
932 case TARGET_RLIMIT_CPU:
934 case TARGET_RLIMIT_DATA:
936 case TARGET_RLIMIT_FSIZE:
938 case TARGET_RLIMIT_LOCKS:
940 case TARGET_RLIMIT_MEMLOCK:
941 return RLIMIT_MEMLOCK;
942 case TARGET_RLIMIT_MSGQUEUE:
943 return RLIMIT_MSGQUEUE;
944 case TARGET_RLIMIT_NICE:
946 case TARGET_RLIMIT_NOFILE:
947 return RLIMIT_NOFILE;
948 case TARGET_RLIMIT_NPROC:
950 case TARGET_RLIMIT_RSS:
952 case TARGET_RLIMIT_RTPRIO:
953 return RLIMIT_RTPRIO;
954 case TARGET_RLIMIT_SIGPENDING:
955 return RLIMIT_SIGPENDING;
956 case TARGET_RLIMIT_STACK:
963 static inline abi_long copy_from_user_timeval(struct timeval *tv,
964 abi_ulong target_tv_addr)
966 struct target_timeval *target_tv;
968 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
969 return -TARGET_EFAULT;
971 __get_user(tv->tv_sec, &target_tv->tv_sec);
972 __get_user(tv->tv_usec, &target_tv->tv_usec);
974 unlock_user_struct(target_tv, target_tv_addr, 0);
979 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
980 const struct timeval *tv)
982 struct target_timeval *target_tv;
984 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
985 return -TARGET_EFAULT;
987 __put_user(tv->tv_sec, &target_tv->tv_sec);
988 __put_user(tv->tv_usec, &target_tv->tv_usec);
990 unlock_user_struct(target_tv, target_tv_addr, 1);
995 static inline abi_long copy_from_user_timezone(struct timezone *tz,
996 abi_ulong target_tz_addr)
998 struct target_timezone *target_tz;
1000 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1001 return -TARGET_EFAULT;
1004 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1005 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1007 unlock_user_struct(target_tz, target_tz_addr, 0);
1012 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1015 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1016 abi_ulong target_mq_attr_addr)
1018 struct target_mq_attr *target_mq_attr;
1020 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1021 target_mq_attr_addr, 1))
1022 return -TARGET_EFAULT;
1024 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1025 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1026 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1027 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1029 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1034 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1035 const struct mq_attr *attr)
1037 struct target_mq_attr *target_mq_attr;
1039 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1040 target_mq_attr_addr, 0))
1041 return -TARGET_EFAULT;
1043 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1044 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1045 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1046 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1048 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1054 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1055 /* do_select() must return target values and target errnos. */
1056 static abi_long do_select(int n,
1057 abi_ulong rfd_addr, abi_ulong wfd_addr,
1058 abi_ulong efd_addr, abi_ulong target_tv_addr)
1060 fd_set rfds, wfds, efds;
1061 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1062 struct timeval tv, *tv_ptr;
1065 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1069 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1073 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1078 if (target_tv_addr) {
1079 if (copy_from_user_timeval(&tv, target_tv_addr))
1080 return -TARGET_EFAULT;
1086 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1088 if (!is_error(ret)) {
1089 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1090 return -TARGET_EFAULT;
1091 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1092 return -TARGET_EFAULT;
1093 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1094 return -TARGET_EFAULT;
1096 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1097 return -TARGET_EFAULT;
1104 static abi_long do_pipe2(int host_pipe[], int flags)
1107 return pipe2(host_pipe, flags);
1113 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1114 int flags, int is_pipe2)
1118 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1121 return get_errno(ret);
1123 /* Several targets have special calling conventions for the original
1124 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1126 #if defined(TARGET_ALPHA)
1127 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1128 return host_pipe[0];
1129 #elif defined(TARGET_MIPS)
1130 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1131 return host_pipe[0];
1132 #elif defined(TARGET_SH4)
1133 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1134 return host_pipe[0];
1135 #elif defined(TARGET_SPARC)
1136 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1137 return host_pipe[0];
1141 if (put_user_s32(host_pipe[0], pipedes)
1142 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1143 return -TARGET_EFAULT;
1144 return get_errno(ret);
1147 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1148 abi_ulong target_addr,
1151 struct target_ip_mreqn *target_smreqn;
1153 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1155 return -TARGET_EFAULT;
1156 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1157 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1158 if (len == sizeof(struct target_ip_mreqn))
1159 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1160 unlock_user(target_smreqn, target_addr, 0);
1165 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1166 abi_ulong target_addr,
1169 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1170 sa_family_t sa_family;
1171 struct target_sockaddr *target_saddr;
1173 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1175 return -TARGET_EFAULT;
1177 sa_family = tswap16(target_saddr->sa_family);
1179 /* Oops. The caller might send a incomplete sun_path; sun_path
1180 * must be terminated by \0 (see the manual page), but
1181 * unfortunately it is quite common to specify sockaddr_un
1182 * length as "strlen(x->sun_path)" while it should be
1183 * "strlen(...) + 1". We'll fix that here if needed.
1184 * Linux kernel has a similar feature.
1187 if (sa_family == AF_UNIX) {
1188 if (len < unix_maxlen && len > 0) {
1189 char *cp = (char*)target_saddr;
1191 if ( cp[len-1] && !cp[len] )
1194 if (len > unix_maxlen)
1198 memcpy(addr, target_saddr, len);
1199 addr->sa_family = sa_family;
1200 if (sa_family == AF_PACKET) {
1201 struct target_sockaddr_ll *lladdr;
1203 lladdr = (struct target_sockaddr_ll *)addr;
1204 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1205 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1207 unlock_user(target_saddr, target_addr, 0);
1212 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1213 struct sockaddr *addr,
1216 struct target_sockaddr *target_saddr;
1218 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1220 return -TARGET_EFAULT;
1221 memcpy(target_saddr, addr, len);
1222 target_saddr->sa_family = tswap16(addr->sa_family);
1223 unlock_user(target_saddr, target_addr, len);
1228 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1229 struct target_msghdr *target_msgh)
1231 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1232 abi_long msg_controllen;
1233 abi_ulong target_cmsg_addr;
1234 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1235 socklen_t space = 0;
1237 msg_controllen = tswapal(target_msgh->msg_controllen);
1238 if (msg_controllen < sizeof (struct target_cmsghdr))
1240 target_cmsg_addr = tswapal(target_msgh->msg_control);
1241 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1242 target_cmsg_start = target_cmsg;
1244 return -TARGET_EFAULT;
1246 while (cmsg && target_cmsg) {
1247 void *data = CMSG_DATA(cmsg);
1248 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1250 int len = tswapal(target_cmsg->cmsg_len)
1251 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1253 space += CMSG_SPACE(len);
1254 if (space > msgh->msg_controllen) {
1255 space -= CMSG_SPACE(len);
1256 /* This is a QEMU bug, since we allocated the payload
1257 * area ourselves (unlike overflow in host-to-target
1258 * conversion, which is just the guest giving us a buffer
1259 * that's too small). It can't happen for the payload types
1260 * we currently support; if it becomes an issue in future
1261 * we would need to improve our allocation strategy to
1262 * something more intelligent than "twice the size of the
1263 * target buffer we're reading from".
1265 gemu_log("Host cmsg overflow\n");
1269 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1270 cmsg->cmsg_level = SOL_SOCKET;
1272 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1274 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1275 cmsg->cmsg_len = CMSG_LEN(len);
1277 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1278 int *fd = (int *)data;
1279 int *target_fd = (int *)target_data;
1280 int i, numfds = len / sizeof(int);
1282 for (i = 0; i < numfds; i++) {
1283 __get_user(fd[i], target_fd + i);
1285 } else if (cmsg->cmsg_level == SOL_SOCKET
1286 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1287 struct ucred *cred = (struct ucred *)data;
1288 struct target_ucred *target_cred =
1289 (struct target_ucred *)target_data;
1291 __get_user(cred->pid, &target_cred->pid);
1292 __get_user(cred->uid, &target_cred->uid);
1293 __get_user(cred->gid, &target_cred->gid);
1295 gemu_log("Unsupported ancillary data: %d/%d\n",
1296 cmsg->cmsg_level, cmsg->cmsg_type);
1297 memcpy(data, target_data, len);
1300 cmsg = CMSG_NXTHDR(msgh, cmsg);
1301 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1304 unlock_user(target_cmsg, target_cmsg_addr, 0);
1306 msgh->msg_controllen = space;
1310 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1311 struct msghdr *msgh)
1313 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1314 abi_long msg_controllen;
1315 abi_ulong target_cmsg_addr;
1316 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1317 socklen_t space = 0;
1319 msg_controllen = tswapal(target_msgh->msg_controllen);
1320 if (msg_controllen < sizeof (struct target_cmsghdr))
1322 target_cmsg_addr = tswapal(target_msgh->msg_control);
1323 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1324 target_cmsg_start = target_cmsg;
1326 return -TARGET_EFAULT;
1328 while (cmsg && target_cmsg) {
1329 void *data = CMSG_DATA(cmsg);
1330 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1332 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1333 int tgt_len, tgt_space;
1335 /* We never copy a half-header but may copy half-data;
1336 * this is Linux's behaviour in put_cmsg(). Note that
1337 * truncation here is a guest problem (which we report
1338 * to the guest via the CTRUNC bit), unlike truncation
1339 * in target_to_host_cmsg, which is a QEMU bug.
1341 if (msg_controllen < sizeof(struct cmsghdr)) {
1342 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1346 if (cmsg->cmsg_level == SOL_SOCKET) {
1347 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1349 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1351 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1353 tgt_len = TARGET_CMSG_LEN(len);
1355 /* Payload types which need a different size of payload on
1356 * the target must adjust tgt_len here.
1358 switch (cmsg->cmsg_level) {
1360 switch (cmsg->cmsg_type) {
1362 tgt_len = sizeof(struct target_timeval);
1371 if (msg_controllen < tgt_len) {
1372 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1373 tgt_len = msg_controllen;
1376 /* We must now copy-and-convert len bytes of payload
1377 * into tgt_len bytes of destination space. Bear in mind
1378 * that in both source and destination we may be dealing
1379 * with a truncated value!
1381 switch (cmsg->cmsg_level) {
1383 switch (cmsg->cmsg_type) {
1386 int *fd = (int *)data;
1387 int *target_fd = (int *)target_data;
1388 int i, numfds = tgt_len / sizeof(int);
1390 for (i = 0; i < numfds; i++) {
1391 __put_user(fd[i], target_fd + i);
1397 struct timeval *tv = (struct timeval *)data;
1398 struct target_timeval *target_tv =
1399 (struct target_timeval *)target_data;
1401 if (len != sizeof(struct timeval) ||
1402 tgt_len != sizeof(struct target_timeval)) {
1406 /* copy struct timeval to target */
1407 __put_user(tv->tv_sec, &target_tv->tv_sec);
1408 __put_user(tv->tv_usec, &target_tv->tv_usec);
1411 case SCM_CREDENTIALS:
1413 struct ucred *cred = (struct ucred *)data;
1414 struct target_ucred *target_cred =
1415 (struct target_ucred *)target_data;
1417 __put_user(cred->pid, &target_cred->pid);
1418 __put_user(cred->uid, &target_cred->uid);
1419 __put_user(cred->gid, &target_cred->gid);
1429 gemu_log("Unsupported ancillary data: %d/%d\n",
1430 cmsg->cmsg_level, cmsg->cmsg_type);
1431 memcpy(target_data, data, MIN(len, tgt_len));
1432 if (tgt_len > len) {
1433 memset(target_data + len, 0, tgt_len - len);
1437 target_cmsg->cmsg_len = tswapal(tgt_len);
1438 tgt_space = TARGET_CMSG_SPACE(len);
1439 if (msg_controllen < tgt_space) {
1440 tgt_space = msg_controllen;
1442 msg_controllen -= tgt_space;
1444 cmsg = CMSG_NXTHDR(msgh, cmsg);
1445 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1448 unlock_user(target_cmsg, target_cmsg_addr, space);
1450 target_msgh->msg_controllen = tswapal(space);
1454 /* do_setsockopt() Must return target values and target errnos. */
1455 static abi_long do_setsockopt(int sockfd, int level, int optname,
1456 abi_ulong optval_addr, socklen_t optlen)
1460 struct ip_mreqn *ip_mreq;
1461 struct ip_mreq_source *ip_mreq_source;
1465 /* TCP options all take an 'int' value. */
1466 if (optlen < sizeof(uint32_t))
1467 return -TARGET_EINVAL;
1469 if (get_user_u32(val, optval_addr))
1470 return -TARGET_EFAULT;
1471 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1478 case IP_ROUTER_ALERT:
1482 case IP_MTU_DISCOVER:
1488 case IP_MULTICAST_TTL:
1489 case IP_MULTICAST_LOOP:
1491 if (optlen >= sizeof(uint32_t)) {
1492 if (get_user_u32(val, optval_addr))
1493 return -TARGET_EFAULT;
1494 } else if (optlen >= 1) {
1495 if (get_user_u8(val, optval_addr))
1496 return -TARGET_EFAULT;
1498 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1500 case IP_ADD_MEMBERSHIP:
1501 case IP_DROP_MEMBERSHIP:
1502 if (optlen < sizeof (struct target_ip_mreq) ||
1503 optlen > sizeof (struct target_ip_mreqn))
1504 return -TARGET_EINVAL;
1506 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1507 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1508 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1511 case IP_BLOCK_SOURCE:
1512 case IP_UNBLOCK_SOURCE:
1513 case IP_ADD_SOURCE_MEMBERSHIP:
1514 case IP_DROP_SOURCE_MEMBERSHIP:
1515 if (optlen != sizeof (struct target_ip_mreq_source))
1516 return -TARGET_EINVAL;
1518 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1519 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1520 unlock_user (ip_mreq_source, optval_addr, 0);
1529 case IPV6_MTU_DISCOVER:
1532 case IPV6_RECVPKTINFO:
1534 if (optlen < sizeof(uint32_t)) {
1535 return -TARGET_EINVAL;
1537 if (get_user_u32(val, optval_addr)) {
1538 return -TARGET_EFAULT;
1540 ret = get_errno(setsockopt(sockfd, level, optname,
1541 &val, sizeof(val)));
1550 /* struct icmp_filter takes an u32 value */
1551 if (optlen < sizeof(uint32_t)) {
1552 return -TARGET_EINVAL;
1555 if (get_user_u32(val, optval_addr)) {
1556 return -TARGET_EFAULT;
1558 ret = get_errno(setsockopt(sockfd, level, optname,
1559 &val, sizeof(val)));
1566 case TARGET_SOL_SOCKET:
1568 case TARGET_SO_RCVTIMEO:
1572 optname = SO_RCVTIMEO;
1575 if (optlen != sizeof(struct target_timeval)) {
1576 return -TARGET_EINVAL;
1579 if (copy_from_user_timeval(&tv, optval_addr)) {
1580 return -TARGET_EFAULT;
1583 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1587 case TARGET_SO_SNDTIMEO:
1588 optname = SO_SNDTIMEO;
1590 case TARGET_SO_ATTACH_FILTER:
1592 struct target_sock_fprog *tfprog;
1593 struct target_sock_filter *tfilter;
1594 struct sock_fprog fprog;
1595 struct sock_filter *filter;
1598 if (optlen != sizeof(*tfprog)) {
1599 return -TARGET_EINVAL;
1601 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1602 return -TARGET_EFAULT;
1604 if (!lock_user_struct(VERIFY_READ, tfilter,
1605 tswapal(tfprog->filter), 0)) {
1606 unlock_user_struct(tfprog, optval_addr, 1);
1607 return -TARGET_EFAULT;
1610 fprog.len = tswap16(tfprog->len);
1611 filter = malloc(fprog.len * sizeof(*filter));
1612 if (filter == NULL) {
1613 unlock_user_struct(tfilter, tfprog->filter, 1);
1614 unlock_user_struct(tfprog, optval_addr, 1);
1615 return -TARGET_ENOMEM;
1617 for (i = 0; i < fprog.len; i++) {
1618 filter[i].code = tswap16(tfilter[i].code);
1619 filter[i].jt = tfilter[i].jt;
1620 filter[i].jf = tfilter[i].jf;
1621 filter[i].k = tswap32(tfilter[i].k);
1623 fprog.filter = filter;
1625 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1626 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1629 unlock_user_struct(tfilter, tfprog->filter, 1);
1630 unlock_user_struct(tfprog, optval_addr, 1);
1633 case TARGET_SO_BINDTODEVICE:
1635 char *dev_ifname, *addr_ifname;
1637 if (optlen > IFNAMSIZ - 1) {
1638 optlen = IFNAMSIZ - 1;
1640 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1642 return -TARGET_EFAULT;
1644 optname = SO_BINDTODEVICE;
1645 addr_ifname = alloca(IFNAMSIZ);
1646 memcpy(addr_ifname, dev_ifname, optlen);
1647 addr_ifname[optlen] = 0;
1648 ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen));
1649 unlock_user (dev_ifname, optval_addr, 0);
1652 /* Options with 'int' argument. */
1653 case TARGET_SO_DEBUG:
1656 case TARGET_SO_REUSEADDR:
1657 optname = SO_REUSEADDR;
1659 case TARGET_SO_TYPE:
1662 case TARGET_SO_ERROR:
1665 case TARGET_SO_DONTROUTE:
1666 optname = SO_DONTROUTE;
1668 case TARGET_SO_BROADCAST:
1669 optname = SO_BROADCAST;
1671 case TARGET_SO_SNDBUF:
1672 optname = SO_SNDBUF;
1674 case TARGET_SO_SNDBUFFORCE:
1675 optname = SO_SNDBUFFORCE;
1677 case TARGET_SO_RCVBUF:
1678 optname = SO_RCVBUF;
1680 case TARGET_SO_RCVBUFFORCE:
1681 optname = SO_RCVBUFFORCE;
1683 case TARGET_SO_KEEPALIVE:
1684 optname = SO_KEEPALIVE;
1686 case TARGET_SO_OOBINLINE:
1687 optname = SO_OOBINLINE;
1689 case TARGET_SO_NO_CHECK:
1690 optname = SO_NO_CHECK;
1692 case TARGET_SO_PRIORITY:
1693 optname = SO_PRIORITY;
1696 case TARGET_SO_BSDCOMPAT:
1697 optname = SO_BSDCOMPAT;
1700 case TARGET_SO_PASSCRED:
1701 optname = SO_PASSCRED;
1703 case TARGET_SO_PASSSEC:
1704 optname = SO_PASSSEC;
1706 case TARGET_SO_TIMESTAMP:
1707 optname = SO_TIMESTAMP;
1709 case TARGET_SO_RCVLOWAT:
1710 optname = SO_RCVLOWAT;
1716 if (optlen < sizeof(uint32_t))
1717 return -TARGET_EINVAL;
1719 if (get_user_u32(val, optval_addr))
1720 return -TARGET_EFAULT;
1721 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1725 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1726 ret = -TARGET_ENOPROTOOPT;
1731 /* do_getsockopt() Must return target values and target errnos. */
1732 static abi_long do_getsockopt(int sockfd, int level, int optname,
1733 abi_ulong optval_addr, abi_ulong optlen)
1740 case TARGET_SOL_SOCKET:
1743 /* These don't just return a single integer */
1744 case TARGET_SO_LINGER:
1745 case TARGET_SO_RCVTIMEO:
1746 case TARGET_SO_SNDTIMEO:
1747 case TARGET_SO_PEERNAME:
1749 case TARGET_SO_PEERCRED: {
1752 struct target_ucred *tcr;
1754 if (get_user_u32(len, optlen)) {
1755 return -TARGET_EFAULT;
1758 return -TARGET_EINVAL;
1762 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1770 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1771 return -TARGET_EFAULT;
1773 __put_user(cr.pid, &tcr->pid);
1774 __put_user(cr.uid, &tcr->uid);
1775 __put_user(cr.gid, &tcr->gid);
1776 unlock_user_struct(tcr, optval_addr, 1);
1777 if (put_user_u32(len, optlen)) {
1778 return -TARGET_EFAULT;
1782 /* Options with 'int' argument. */
1783 case TARGET_SO_DEBUG:
1786 case TARGET_SO_REUSEADDR:
1787 optname = SO_REUSEADDR;
1789 case TARGET_SO_TYPE:
1792 case TARGET_SO_ERROR:
1795 case TARGET_SO_DONTROUTE:
1796 optname = SO_DONTROUTE;
1798 case TARGET_SO_BROADCAST:
1799 optname = SO_BROADCAST;
1801 case TARGET_SO_SNDBUF:
1802 optname = SO_SNDBUF;
1804 case TARGET_SO_RCVBUF:
1805 optname = SO_RCVBUF;
1807 case TARGET_SO_KEEPALIVE:
1808 optname = SO_KEEPALIVE;
1810 case TARGET_SO_OOBINLINE:
1811 optname = SO_OOBINLINE;
1813 case TARGET_SO_NO_CHECK:
1814 optname = SO_NO_CHECK;
1816 case TARGET_SO_PRIORITY:
1817 optname = SO_PRIORITY;
1820 case TARGET_SO_BSDCOMPAT:
1821 optname = SO_BSDCOMPAT;
1824 case TARGET_SO_PASSCRED:
1825 optname = SO_PASSCRED;
1827 case TARGET_SO_TIMESTAMP:
1828 optname = SO_TIMESTAMP;
1830 case TARGET_SO_RCVLOWAT:
1831 optname = SO_RCVLOWAT;
1833 case TARGET_SO_ACCEPTCONN:
1834 optname = SO_ACCEPTCONN;
1841 /* TCP options all take an 'int' value. */
1843 if (get_user_u32(len, optlen))
1844 return -TARGET_EFAULT;
1846 return -TARGET_EINVAL;
1848 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1851 if (optname == SO_TYPE) {
1852 val = host_to_target_sock_type(val);
1857 if (put_user_u32(val, optval_addr))
1858 return -TARGET_EFAULT;
1860 if (put_user_u8(val, optval_addr))
1861 return -TARGET_EFAULT;
1863 if (put_user_u32(len, optlen))
1864 return -TARGET_EFAULT;
1871 case IP_ROUTER_ALERT:
1875 case IP_MTU_DISCOVER:
1881 case IP_MULTICAST_TTL:
1882 case IP_MULTICAST_LOOP:
1883 if (get_user_u32(len, optlen))
1884 return -TARGET_EFAULT;
1886 return -TARGET_EINVAL;
1888 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1891 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1893 if (put_user_u32(len, optlen)
1894 || put_user_u8(val, optval_addr))
1895 return -TARGET_EFAULT;
1897 if (len > sizeof(int))
1899 if (put_user_u32(len, optlen)
1900 || put_user_u32(val, optval_addr))
1901 return -TARGET_EFAULT;
1905 ret = -TARGET_ENOPROTOOPT;
1911 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1913 ret = -TARGET_EOPNOTSUPP;
1919 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1920 int count, int copy)
1922 struct target_iovec *target_vec;
1924 abi_ulong total_len, max_len;
1927 bool bad_address = false;
1933 if (count < 0 || count > IOV_MAX) {
1938 vec = calloc(count, sizeof(struct iovec));
1944 target_vec = lock_user(VERIFY_READ, target_addr,
1945 count * sizeof(struct target_iovec), 1);
1946 if (target_vec == NULL) {
1951 /* ??? If host page size > target page size, this will result in a
1952 value larger than what we can actually support. */
1953 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1956 for (i = 0; i < count; i++) {
1957 abi_ulong base = tswapal(target_vec[i].iov_base);
1958 abi_long len = tswapal(target_vec[i].iov_len);
1963 } else if (len == 0) {
1964 /* Zero length pointer is ignored. */
1965 vec[i].iov_base = 0;
1967 vec[i].iov_base = lock_user(type, base, len, copy);
1968 /* If the first buffer pointer is bad, this is a fault. But
1969 * subsequent bad buffers will result in a partial write; this
1970 * is realized by filling the vector with null pointers and
1972 if (!vec[i].iov_base) {
1983 if (len > max_len - total_len) {
1984 len = max_len - total_len;
1987 vec[i].iov_len = len;
1991 unlock_user(target_vec, target_addr, 0);
1996 if (tswapal(target_vec[i].iov_len) > 0) {
1997 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2000 unlock_user(target_vec, target_addr, 0);
2007 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2008 int count, int copy)
2010 struct target_iovec *target_vec;
2013 target_vec = lock_user(VERIFY_READ, target_addr,
2014 count * sizeof(struct target_iovec), 1);
2016 for (i = 0; i < count; i++) {
2017 abi_ulong base = tswapal(target_vec[i].iov_base);
2018 abi_long len = tswapal(target_vec[i].iov_len);
2022 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2024 unlock_user(target_vec, target_addr, 0);
2030 static inline int target_to_host_sock_type(int *type)
2033 int target_type = *type;
2035 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2036 case TARGET_SOCK_DGRAM:
2037 host_type = SOCK_DGRAM;
2039 case TARGET_SOCK_STREAM:
2040 host_type = SOCK_STREAM;
2043 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2046 if (target_type & TARGET_SOCK_CLOEXEC) {
2047 #if defined(SOCK_CLOEXEC)
2048 host_type |= SOCK_CLOEXEC;
2050 return -TARGET_EINVAL;
2053 if (target_type & TARGET_SOCK_NONBLOCK) {
2054 #if defined(SOCK_NONBLOCK)
2055 host_type |= SOCK_NONBLOCK;
2056 #elif !defined(O_NONBLOCK)
2057 return -TARGET_EINVAL;
2064 /* Try to emulate socket type flags after socket creation. */
2065 static int sock_flags_fixup(int fd, int target_type)
2067 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2068 if (target_type & TARGET_SOCK_NONBLOCK) {
2069 int flags = fcntl(fd, F_GETFL);
2070 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2072 return -TARGET_EINVAL;
2079 /* do_socket() Must return target values and target errnos. */
2080 static abi_long do_socket(int domain, int type, int protocol)
2082 int target_type = type;
2085 ret = target_to_host_sock_type(&type);
2090 if (domain == PF_NETLINK)
2091 return -TARGET_EAFNOSUPPORT;
2092 ret = get_errno(socket(domain, type, protocol));
2094 ret = sock_flags_fixup(ret, target_type);
2099 /* do_bind() Must return target values and target errnos. */
2100 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2106 if ((int)addrlen < 0) {
2107 return -TARGET_EINVAL;
2110 addr = alloca(addrlen+1);
2112 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2116 return get_errno(bind(sockfd, addr, addrlen));
2119 /* do_connect() Must return target values and target errnos. */
2120 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2126 if ((int)addrlen < 0) {
2127 return -TARGET_EINVAL;
2130 addr = alloca(addrlen+1);
2132 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2136 return get_errno(connect(sockfd, addr, addrlen));
2139 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2140 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2141 int flags, int send)
2147 abi_ulong target_vec;
2149 if (msgp->msg_name) {
2150 msg.msg_namelen = tswap32(msgp->msg_namelen);
2151 msg.msg_name = alloca(msg.msg_namelen+1);
2152 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
2158 msg.msg_name = NULL;
2159 msg.msg_namelen = 0;
2161 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2162 msg.msg_control = alloca(msg.msg_controllen);
2163 msg.msg_flags = tswap32(msgp->msg_flags);
2165 count = tswapal(msgp->msg_iovlen);
2166 target_vec = tswapal(msgp->msg_iov);
2167 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2168 target_vec, count, send);
2170 ret = -host_to_target_errno(errno);
2173 msg.msg_iovlen = count;
2177 ret = target_to_host_cmsg(&msg, msgp);
2179 ret = get_errno(sendmsg(fd, &msg, flags));
2181 ret = get_errno(recvmsg(fd, &msg, flags));
2182 if (!is_error(ret)) {
2184 ret = host_to_target_cmsg(msgp, &msg);
2185 if (!is_error(ret)) {
2186 msgp->msg_namelen = tswap32(msg.msg_namelen);
2187 if (msg.msg_name != NULL) {
2188 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2189 msg.msg_name, msg.msg_namelen);
2201 unlock_iovec(vec, target_vec, count, !send);
2206 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2207 int flags, int send)
2210 struct target_msghdr *msgp;
2212 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2216 return -TARGET_EFAULT;
2218 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2219 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2223 #ifdef TARGET_NR_sendmmsg
2224 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2225 * so it might not have this *mmsg-specific flag either.
2227 #ifndef MSG_WAITFORONE
2228 #define MSG_WAITFORONE 0x10000
2231 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2232 unsigned int vlen, unsigned int flags,
2235 struct target_mmsghdr *mmsgp;
2239 if (vlen > UIO_MAXIOV) {
2243 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2245 return -TARGET_EFAULT;
2248 for (i = 0; i < vlen; i++) {
2249 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2250 if (is_error(ret)) {
2253 mmsgp[i].msg_len = tswap32(ret);
2254 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2255 if (flags & MSG_WAITFORONE) {
2256 flags |= MSG_DONTWAIT;
2260 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2262 /* Return number of datagrams sent if we sent any at all;
2263 * otherwise return the error.
2272 /* If we don't have a system accept4() then just call accept.
2273 * The callsites to do_accept4() will ensure that they don't
2274 * pass a non-zero flags argument in this config.
2276 #ifndef CONFIG_ACCEPT4
2277 static inline int accept4(int sockfd, struct sockaddr *addr,
2278 socklen_t *addrlen, int flags)
2281 return accept(sockfd, addr, addrlen);
2285 /* do_accept4() Must return target values and target errnos. */
2286 static abi_long do_accept4(int fd, abi_ulong target_addr,
2287 abi_ulong target_addrlen_addr, int flags)
2294 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2296 if (target_addr == 0) {
2297 return get_errno(accept4(fd, NULL, NULL, host_flags));
2300 /* linux returns EINVAL if addrlen pointer is invalid */
2301 if (get_user_u32(addrlen, target_addrlen_addr))
2302 return -TARGET_EINVAL;
2304 if ((int)addrlen < 0) {
2305 return -TARGET_EINVAL;
2308 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2309 return -TARGET_EINVAL;
2311 addr = alloca(addrlen);
2313 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2314 if (!is_error(ret)) {
2315 host_to_target_sockaddr(target_addr, addr, addrlen);
2316 if (put_user_u32(addrlen, target_addrlen_addr))
2317 ret = -TARGET_EFAULT;
2322 /* do_getpeername() Must return target values and target errnos. */
2323 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2324 abi_ulong target_addrlen_addr)
2330 if (get_user_u32(addrlen, target_addrlen_addr))
2331 return -TARGET_EFAULT;
2333 if ((int)addrlen < 0) {
2334 return -TARGET_EINVAL;
2337 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2338 return -TARGET_EFAULT;
2340 addr = alloca(addrlen);
2342 ret = get_errno(getpeername(fd, addr, &addrlen));
2343 if (!is_error(ret)) {
2344 host_to_target_sockaddr(target_addr, addr, addrlen);
2345 if (put_user_u32(addrlen, target_addrlen_addr))
2346 ret = -TARGET_EFAULT;
2351 /* do_getsockname() Must return target values and target errnos. */
2352 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2353 abi_ulong target_addrlen_addr)
2359 if (get_user_u32(addrlen, target_addrlen_addr))
2360 return -TARGET_EFAULT;
2362 if ((int)addrlen < 0) {
2363 return -TARGET_EINVAL;
2366 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2367 return -TARGET_EFAULT;
2369 addr = alloca(addrlen);
2371 ret = get_errno(getsockname(fd, addr, &addrlen));
2372 if (!is_error(ret)) {
2373 host_to_target_sockaddr(target_addr, addr, addrlen);
2374 if (put_user_u32(addrlen, target_addrlen_addr))
2375 ret = -TARGET_EFAULT;
2380 /* do_socketpair() Must return target values and target errnos. */
2381 static abi_long do_socketpair(int domain, int type, int protocol,
2382 abi_ulong target_tab_addr)
2387 target_to_host_sock_type(&type);
2389 ret = get_errno(socketpair(domain, type, protocol, tab));
2390 if (!is_error(ret)) {
2391 if (put_user_s32(tab[0], target_tab_addr)
2392 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2393 ret = -TARGET_EFAULT;
2398 /* do_sendto() Must return target values and target errnos. */
2399 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2400 abi_ulong target_addr, socklen_t addrlen)
2406 if ((int)addrlen < 0) {
2407 return -TARGET_EINVAL;
2410 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2412 return -TARGET_EFAULT;
2414 addr = alloca(addrlen+1);
2415 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2417 unlock_user(host_msg, msg, 0);
2420 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2422 ret = get_errno(send(fd, host_msg, len, flags));
2424 unlock_user(host_msg, msg, 0);
2428 /* do_recvfrom() Must return target values and target errnos. */
2429 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2430 abi_ulong target_addr,
2431 abi_ulong target_addrlen)
2438 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2440 return -TARGET_EFAULT;
2442 if (get_user_u32(addrlen, target_addrlen)) {
2443 ret = -TARGET_EFAULT;
2446 if ((int)addrlen < 0) {
2447 ret = -TARGET_EINVAL;
2450 addr = alloca(addrlen);
2451 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2453 addr = NULL; /* To keep compiler quiet. */
2454 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2456 if (!is_error(ret)) {
2458 host_to_target_sockaddr(target_addr, addr, addrlen);
2459 if (put_user_u32(addrlen, target_addrlen)) {
2460 ret = -TARGET_EFAULT;
2464 unlock_user(host_msg, msg, len);
2467 unlock_user(host_msg, msg, 0);
2472 #ifdef TARGET_NR_socketcall
2473 /* do_socketcall() Must return target values and target errnos. */
2474 static abi_long do_socketcall(int num, abi_ulong vptr)
2476 static const unsigned ac[] = { /* number of arguments per call */
2477 [SOCKOP_socket] = 3, /* domain, type, protocol */
2478 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2479 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2480 [SOCKOP_listen] = 2, /* sockfd, backlog */
2481 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2482 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2483 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2484 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2485 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2486 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2487 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2488 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2489 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2490 [SOCKOP_shutdown] = 2, /* sockfd, how */
2491 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2492 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2493 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2494 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2496 abi_long a[6]; /* max 6 args */
2498 /* first, collect the arguments in a[] according to ac[] */
2499 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2501 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2502 for (i = 0; i < ac[num]; ++i) {
2503 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2504 return -TARGET_EFAULT;
2509 /* now when we have the args, actually handle the call */
2511 case SOCKOP_socket: /* domain, type, protocol */
2512 return do_socket(a[0], a[1], a[2]);
2513 case SOCKOP_bind: /* sockfd, addr, addrlen */
2514 return do_bind(a[0], a[1], a[2]);
2515 case SOCKOP_connect: /* sockfd, addr, addrlen */
2516 return do_connect(a[0], a[1], a[2]);
2517 case SOCKOP_listen: /* sockfd, backlog */
2518 return get_errno(listen(a[0], a[1]));
2519 case SOCKOP_accept: /* sockfd, addr, addrlen */
2520 return do_accept4(a[0], a[1], a[2], 0);
2521 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2522 return do_accept4(a[0], a[1], a[2], a[3]);
2523 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2524 return do_getsockname(a[0], a[1], a[2]);
2525 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2526 return do_getpeername(a[0], a[1], a[2]);
2527 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2528 return do_socketpair(a[0], a[1], a[2], a[3]);
2529 case SOCKOP_send: /* sockfd, msg, len, flags */
2530 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2531 case SOCKOP_recv: /* sockfd, msg, len, flags */
2532 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2533 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2534 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2535 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2536 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2537 case SOCKOP_shutdown: /* sockfd, how */
2538 return get_errno(shutdown(a[0], a[1]));
2539 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2540 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2541 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2542 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2543 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2544 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2545 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2546 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2548 gemu_log("Unsupported socketcall: %d\n", num);
2549 return -TARGET_ENOSYS;
2554 #define N_SHM_REGIONS 32
2556 static struct shm_region {
2559 } shm_regions[N_SHM_REGIONS];
2561 struct target_semid_ds
2563 struct target_ipc_perm sem_perm;
2564 abi_ulong sem_otime;
2565 #if !defined(TARGET_PPC64)
2566 abi_ulong __unused1;
2568 abi_ulong sem_ctime;
2569 #if !defined(TARGET_PPC64)
2570 abi_ulong __unused2;
2572 abi_ulong sem_nsems;
2573 abi_ulong __unused3;
2574 abi_ulong __unused4;
2577 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2578 abi_ulong target_addr)
2580 struct target_ipc_perm *target_ip;
2581 struct target_semid_ds *target_sd;
2583 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2584 return -TARGET_EFAULT;
2585 target_ip = &(target_sd->sem_perm);
2586 host_ip->__key = tswap32(target_ip->__key);
2587 host_ip->uid = tswap32(target_ip->uid);
2588 host_ip->gid = tswap32(target_ip->gid);
2589 host_ip->cuid = tswap32(target_ip->cuid);
2590 host_ip->cgid = tswap32(target_ip->cgid);
2591 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2592 host_ip->mode = tswap32(target_ip->mode);
2594 host_ip->mode = tswap16(target_ip->mode);
2596 #if defined(TARGET_PPC)
2597 host_ip->__seq = tswap32(target_ip->__seq);
2599 host_ip->__seq = tswap16(target_ip->__seq);
2601 unlock_user_struct(target_sd, target_addr, 0);
2605 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2606 struct ipc_perm *host_ip)
2608 struct target_ipc_perm *target_ip;
2609 struct target_semid_ds *target_sd;
2611 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2612 return -TARGET_EFAULT;
2613 target_ip = &(target_sd->sem_perm);
2614 target_ip->__key = tswap32(host_ip->__key);
2615 target_ip->uid = tswap32(host_ip->uid);
2616 target_ip->gid = tswap32(host_ip->gid);
2617 target_ip->cuid = tswap32(host_ip->cuid);
2618 target_ip->cgid = tswap32(host_ip->cgid);
2619 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2620 target_ip->mode = tswap32(host_ip->mode);
2622 target_ip->mode = tswap16(host_ip->mode);
2624 #if defined(TARGET_PPC)
2625 target_ip->__seq = tswap32(host_ip->__seq);
2627 target_ip->__seq = tswap16(host_ip->__seq);
2629 unlock_user_struct(target_sd, target_addr, 1);
2633 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2634 abi_ulong target_addr)
2636 struct target_semid_ds *target_sd;
2638 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2639 return -TARGET_EFAULT;
2640 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2641 return -TARGET_EFAULT;
2642 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2643 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2644 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2645 unlock_user_struct(target_sd, target_addr, 0);
2649 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2650 struct semid_ds *host_sd)
2652 struct target_semid_ds *target_sd;
2654 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2655 return -TARGET_EFAULT;
2656 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2657 return -TARGET_EFAULT;
2658 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2659 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2660 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2661 unlock_user_struct(target_sd, target_addr, 1);
2665 struct target_seminfo {
2678 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2679 struct seminfo *host_seminfo)
2681 struct target_seminfo *target_seminfo;
2682 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2683 return -TARGET_EFAULT;
2684 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2685 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2686 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2687 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2688 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2689 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2690 __put_user(host_seminfo->semume, &target_seminfo->semume);
2691 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2692 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2693 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2694 unlock_user_struct(target_seminfo, target_addr, 1);
2700 struct semid_ds *buf;
2701 unsigned short *array;
2702 struct seminfo *__buf;
2705 union target_semun {
2712 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2713 abi_ulong target_addr)
2716 unsigned short *array;
2718 struct semid_ds semid_ds;
2721 semun.buf = &semid_ds;
2723 ret = semctl(semid, 0, IPC_STAT, semun);
2725 return get_errno(ret);
2727 nsems = semid_ds.sem_nsems;
2729 *host_array = malloc(nsems*sizeof(unsigned short));
2731 return -TARGET_ENOMEM;
2733 array = lock_user(VERIFY_READ, target_addr,
2734 nsems*sizeof(unsigned short), 1);
2737 return -TARGET_EFAULT;
2740 for(i=0; i<nsems; i++) {
2741 __get_user((*host_array)[i], &array[i]);
2743 unlock_user(array, target_addr, 0);
2748 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2749 unsigned short **host_array)
2752 unsigned short *array;
2754 struct semid_ds semid_ds;
2757 semun.buf = &semid_ds;
2759 ret = semctl(semid, 0, IPC_STAT, semun);
2761 return get_errno(ret);
2763 nsems = semid_ds.sem_nsems;
2765 array = lock_user(VERIFY_WRITE, target_addr,
2766 nsems*sizeof(unsigned short), 0);
2768 return -TARGET_EFAULT;
2770 for(i=0; i<nsems; i++) {
2771 __put_user((*host_array)[i], &array[i]);
2774 unlock_user(array, target_addr, 1);
2779 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2780 abi_ulong target_arg)
2782 union target_semun target_su = { .buf = target_arg };
2784 struct semid_ds dsarg;
2785 unsigned short *array = NULL;
2786 struct seminfo seminfo;
2787 abi_long ret = -TARGET_EINVAL;
2794 /* In 64 bit cross-endian situations, we will erroneously pick up
2795 * the wrong half of the union for the "val" element. To rectify
2796 * this, the entire 8-byte structure is byteswapped, followed by
2797 * a swap of the 4 byte val field. In other cases, the data is
2798 * already in proper host byte order. */
2799 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2800 target_su.buf = tswapal(target_su.buf);
2801 arg.val = tswap32(target_su.val);
2803 arg.val = target_su.val;
2805 ret = get_errno(semctl(semid, semnum, cmd, arg));
2809 err = target_to_host_semarray(semid, &array, target_su.array);
2813 ret = get_errno(semctl(semid, semnum, cmd, arg));
2814 err = host_to_target_semarray(semid, target_su.array, &array);
2821 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2825 ret = get_errno(semctl(semid, semnum, cmd, arg));
2826 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2832 arg.__buf = &seminfo;
2833 ret = get_errno(semctl(semid, semnum, cmd, arg));
2834 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2842 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2849 struct target_sembuf {
2850 unsigned short sem_num;
2855 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2856 abi_ulong target_addr,
2859 struct target_sembuf *target_sembuf;
2862 target_sembuf = lock_user(VERIFY_READ, target_addr,
2863 nsops*sizeof(struct target_sembuf), 1);
2865 return -TARGET_EFAULT;
2867 for(i=0; i<nsops; i++) {
2868 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2869 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2870 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2873 unlock_user(target_sembuf, target_addr, 0);
2878 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2880 struct sembuf sops[nsops];
2882 if (target_to_host_sembuf(sops, ptr, nsops))
2883 return -TARGET_EFAULT;
2885 return get_errno(semop(semid, sops, nsops));
2888 struct target_msqid_ds
2890 struct target_ipc_perm msg_perm;
2891 abi_ulong msg_stime;
2892 #if TARGET_ABI_BITS == 32
2893 abi_ulong __unused1;
2895 abi_ulong msg_rtime;
2896 #if TARGET_ABI_BITS == 32
2897 abi_ulong __unused2;
2899 abi_ulong msg_ctime;
2900 #if TARGET_ABI_BITS == 32
2901 abi_ulong __unused3;
2903 abi_ulong __msg_cbytes;
2905 abi_ulong msg_qbytes;
2906 abi_ulong msg_lspid;
2907 abi_ulong msg_lrpid;
2908 abi_ulong __unused4;
2909 abi_ulong __unused5;
2912 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2913 abi_ulong target_addr)
2915 struct target_msqid_ds *target_md;
2917 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2918 return -TARGET_EFAULT;
2919 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2920 return -TARGET_EFAULT;
2921 host_md->msg_stime = tswapal(target_md->msg_stime);
2922 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2923 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2924 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2925 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2926 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2927 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2928 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2929 unlock_user_struct(target_md, target_addr, 0);
2933 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2934 struct msqid_ds *host_md)
2936 struct target_msqid_ds *target_md;
2938 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2939 return -TARGET_EFAULT;
2940 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2941 return -TARGET_EFAULT;
2942 target_md->msg_stime = tswapal(host_md->msg_stime);
2943 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2944 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2945 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2946 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2947 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2948 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2949 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2950 unlock_user_struct(target_md, target_addr, 1);
2954 struct target_msginfo {
2962 unsigned short int msgseg;
2965 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2966 struct msginfo *host_msginfo)
2968 struct target_msginfo *target_msginfo;
2969 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2970 return -TARGET_EFAULT;
2971 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2972 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2973 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2974 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2975 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2976 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2977 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2978 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2979 unlock_user_struct(target_msginfo, target_addr, 1);
2983 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2985 struct msqid_ds dsarg;
2986 struct msginfo msginfo;
2987 abi_long ret = -TARGET_EINVAL;
2995 if (target_to_host_msqid_ds(&dsarg,ptr))
2996 return -TARGET_EFAULT;
2997 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2998 if (host_to_target_msqid_ds(ptr,&dsarg))
2999 return -TARGET_EFAULT;
3002 ret = get_errno(msgctl(msgid, cmd, NULL));
3006 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3007 if (host_to_target_msginfo(ptr, &msginfo))
3008 return -TARGET_EFAULT;
3015 struct target_msgbuf {
3020 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3021 ssize_t msgsz, int msgflg)
3023 struct target_msgbuf *target_mb;
3024 struct msgbuf *host_mb;
3028 return -TARGET_EINVAL;
3031 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3032 return -TARGET_EFAULT;
3033 host_mb = malloc(msgsz+sizeof(long));
3035 unlock_user_struct(target_mb, msgp, 0);
3036 return -TARGET_ENOMEM;
3038 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3039 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3040 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
3042 unlock_user_struct(target_mb, msgp, 0);
3047 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3048 unsigned int msgsz, abi_long msgtyp,
3051 struct target_msgbuf *target_mb;
3053 struct msgbuf *host_mb;
3056 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3057 return -TARGET_EFAULT;
3059 host_mb = g_malloc(msgsz+sizeof(long));
3060 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3063 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3064 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3065 if (!target_mtext) {
3066 ret = -TARGET_EFAULT;
3069 memcpy(target_mb->mtext, host_mb->mtext, ret);
3070 unlock_user(target_mtext, target_mtext_addr, ret);
3073 target_mb->mtype = tswapal(host_mb->mtype);
3077 unlock_user_struct(target_mb, msgp, 1);
3082 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3083 abi_ulong target_addr)
3085 struct target_shmid_ds *target_sd;
3087 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3088 return -TARGET_EFAULT;
3089 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3090 return -TARGET_EFAULT;
3091 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3092 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3093 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3094 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3095 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3096 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3097 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3098 unlock_user_struct(target_sd, target_addr, 0);
3102 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3103 struct shmid_ds *host_sd)
3105 struct target_shmid_ds *target_sd;
3107 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3108 return -TARGET_EFAULT;
3109 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3110 return -TARGET_EFAULT;
3111 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3112 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3113 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3114 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3115 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3116 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3117 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3118 unlock_user_struct(target_sd, target_addr, 1);
3122 struct target_shminfo {
3130 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3131 struct shminfo *host_shminfo)
3133 struct target_shminfo *target_shminfo;
3134 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3135 return -TARGET_EFAULT;
3136 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3137 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3138 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3139 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3140 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3141 unlock_user_struct(target_shminfo, target_addr, 1);
3145 struct target_shm_info {
3150 abi_ulong swap_attempts;
3151 abi_ulong swap_successes;
3154 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3155 struct shm_info *host_shm_info)
3157 struct target_shm_info *target_shm_info;
3158 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3159 return -TARGET_EFAULT;
3160 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3161 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3162 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3163 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3164 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3165 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3166 unlock_user_struct(target_shm_info, target_addr, 1);
3170 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3172 struct shmid_ds dsarg;
3173 struct shminfo shminfo;
3174 struct shm_info shm_info;
3175 abi_long ret = -TARGET_EINVAL;
3183 if (target_to_host_shmid_ds(&dsarg, buf))
3184 return -TARGET_EFAULT;
3185 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3186 if (host_to_target_shmid_ds(buf, &dsarg))
3187 return -TARGET_EFAULT;
3190 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3191 if (host_to_target_shminfo(buf, &shminfo))
3192 return -TARGET_EFAULT;
3195 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3196 if (host_to_target_shm_info(buf, &shm_info))
3197 return -TARGET_EFAULT;
3202 ret = get_errno(shmctl(shmid, cmd, NULL));
3209 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3213 struct shmid_ds shm_info;
3216 /* find out the length of the shared memory segment */
3217 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3218 if (is_error(ret)) {
3219 /* can't get length, bail out */
3226 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3228 abi_ulong mmap_start;
3230 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3232 if (mmap_start == -1) {
3234 host_raddr = (void *)-1;
3236 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3239 if (host_raddr == (void *)-1) {
3241 return get_errno((long)host_raddr);
3243 raddr=h2g((unsigned long)host_raddr);
3245 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3246 PAGE_VALID | PAGE_READ |
3247 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3249 for (i = 0; i < N_SHM_REGIONS; i++) {
3250 if (shm_regions[i].start == 0) {
3251 shm_regions[i].start = raddr;
3252 shm_regions[i].size = shm_info.shm_segsz;
3262 static inline abi_long do_shmdt(abi_ulong shmaddr)
3266 for (i = 0; i < N_SHM_REGIONS; ++i) {
3267 if (shm_regions[i].start == shmaddr) {
3268 shm_regions[i].start = 0;
3269 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3274 return get_errno(shmdt(g2h(shmaddr)));
3277 #ifdef TARGET_NR_ipc
3278 /* ??? This only works with linear mappings. */
3279 /* do_ipc() must return target values and target errnos. */
3280 static abi_long do_ipc(unsigned int call, abi_long first,
3281 abi_long second, abi_long third,
3282 abi_long ptr, abi_long fifth)
3287 version = call >> 16;
3292 ret = do_semop(first, ptr, second);
3296 ret = get_errno(semget(first, second, third));
3299 case IPCOP_semctl: {
3300 /* The semun argument to semctl is passed by value, so dereference the
3303 get_user_ual(atptr, ptr);
3304 ret = do_semctl(first, second, third, atptr);
3309 ret = get_errno(msgget(first, second));
3313 ret = do_msgsnd(first, ptr, second, third);
3317 ret = do_msgctl(first, second, ptr);
3324 struct target_ipc_kludge {
3329 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3330 ret = -TARGET_EFAULT;
3334 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3336 unlock_user_struct(tmp, ptr, 0);
3340 ret = do_msgrcv(first, ptr, second, fifth, third);
3349 raddr = do_shmat(first, ptr, second);
3350 if (is_error(raddr))
3351 return get_errno(raddr);
3352 if (put_user_ual(raddr, third))
3353 return -TARGET_EFAULT;
3357 ret = -TARGET_EINVAL;
3362 ret = do_shmdt(ptr);
3366 /* IPC_* flag values are the same on all linux platforms */
3367 ret = get_errno(shmget(first, second, third));
3370 /* IPC_* and SHM_* command values are the same on all linux platforms */
3372 ret = do_shmctl(first, second, ptr);
3375 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3376 ret = -TARGET_ENOSYS;
3383 /* kernel structure types definitions */
3385 #define STRUCT(name, ...) STRUCT_ ## name,
3386 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3388 #include "syscall_types.h"
3392 #undef STRUCT_SPECIAL
3394 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3395 #define STRUCT_SPECIAL(name)
3396 #include "syscall_types.h"
3398 #undef STRUCT_SPECIAL
3400 typedef struct IOCTLEntry IOCTLEntry;
3402 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3403 int fd, int cmd, abi_long arg);
3407 unsigned int host_cmd;
3410 do_ioctl_fn *do_ioctl;
3411 const argtype arg_type[5];
3414 #define IOC_R 0x0001
3415 #define IOC_W 0x0002
3416 #define IOC_RW (IOC_R | IOC_W)
3418 #define MAX_STRUCT_SIZE 4096
3420 #ifdef CONFIG_FIEMAP
3421 /* So fiemap access checks don't overflow on 32 bit systems.
3422 * This is very slightly smaller than the limit imposed by
3423 * the underlying kernel.
3425 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3426 / sizeof(struct fiemap_extent))
3428 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3429 int fd, int cmd, abi_long arg)
3431 /* The parameter for this ioctl is a struct fiemap followed
3432 * by an array of struct fiemap_extent whose size is set
3433 * in fiemap->fm_extent_count. The array is filled in by the
3436 int target_size_in, target_size_out;
3438 const argtype *arg_type = ie->arg_type;
3439 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3442 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3446 assert(arg_type[0] == TYPE_PTR);
3447 assert(ie->access == IOC_RW);
3449 target_size_in = thunk_type_size(arg_type, 0);
3450 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3452 return -TARGET_EFAULT;
3454 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3455 unlock_user(argptr, arg, 0);
3456 fm = (struct fiemap *)buf_temp;
3457 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3458 return -TARGET_EINVAL;
3461 outbufsz = sizeof (*fm) +
3462 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3464 if (outbufsz > MAX_STRUCT_SIZE) {
3465 /* We can't fit all the extents into the fixed size buffer.
3466 * Allocate one that is large enough and use it instead.
3468 fm = malloc(outbufsz);
3470 return -TARGET_ENOMEM;
3472 memcpy(fm, buf_temp, sizeof(struct fiemap));
3475 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3476 if (!is_error(ret)) {
3477 target_size_out = target_size_in;
3478 /* An extent_count of 0 means we were only counting the extents
3479 * so there are no structs to copy
3481 if (fm->fm_extent_count != 0) {
3482 target_size_out += fm->fm_mapped_extents * extent_size;
3484 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3486 ret = -TARGET_EFAULT;
3488 /* Convert the struct fiemap */
3489 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3490 if (fm->fm_extent_count != 0) {
3491 p = argptr + target_size_in;
3492 /* ...and then all the struct fiemap_extents */
3493 for (i = 0; i < fm->fm_mapped_extents; i++) {
3494 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3499 unlock_user(argptr, arg, target_size_out);
3509 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3510 int fd, int cmd, abi_long arg)
3512 const argtype *arg_type = ie->arg_type;
3516 struct ifconf *host_ifconf;
3518 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3519 int target_ifreq_size;
3524 abi_long target_ifc_buf;
3528 assert(arg_type[0] == TYPE_PTR);
3529 assert(ie->access == IOC_RW);
3532 target_size = thunk_type_size(arg_type, 0);
3534 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3536 return -TARGET_EFAULT;
3537 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3538 unlock_user(argptr, arg, 0);
3540 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3541 target_ifc_len = host_ifconf->ifc_len;
3542 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3544 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3545 nb_ifreq = target_ifc_len / target_ifreq_size;
3546 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3548 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3549 if (outbufsz > MAX_STRUCT_SIZE) {
3550 /* We can't fit all the extents into the fixed size buffer.
3551 * Allocate one that is large enough and use it instead.
3553 host_ifconf = malloc(outbufsz);
3555 return -TARGET_ENOMEM;
3557 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3560 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3562 host_ifconf->ifc_len = host_ifc_len;
3563 host_ifconf->ifc_buf = host_ifc_buf;
3565 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3566 if (!is_error(ret)) {
3567 /* convert host ifc_len to target ifc_len */
3569 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3570 target_ifc_len = nb_ifreq * target_ifreq_size;
3571 host_ifconf->ifc_len = target_ifc_len;
3573 /* restore target ifc_buf */
3575 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3577 /* copy struct ifconf to target user */
3579 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3581 return -TARGET_EFAULT;
3582 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3583 unlock_user(argptr, arg, target_size);
3585 /* copy ifreq[] to target user */
3587 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3588 for (i = 0; i < nb_ifreq ; i++) {
3589 thunk_convert(argptr + i * target_ifreq_size,
3590 host_ifc_buf + i * sizeof(struct ifreq),
3591 ifreq_arg_type, THUNK_TARGET);
3593 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3603 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3604 int cmd, abi_long arg)
3607 struct dm_ioctl *host_dm;
3608 abi_long guest_data;
3609 uint32_t guest_data_size;
3611 const argtype *arg_type = ie->arg_type;
3613 void *big_buf = NULL;
3617 target_size = thunk_type_size(arg_type, 0);
3618 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3620 ret = -TARGET_EFAULT;
3623 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3624 unlock_user(argptr, arg, 0);
3626 /* buf_temp is too small, so fetch things into a bigger buffer */
3627 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3628 memcpy(big_buf, buf_temp, target_size);
3632 guest_data = arg + host_dm->data_start;
3633 if ((guest_data - arg) < 0) {
3637 guest_data_size = host_dm->data_size - host_dm->data_start;
3638 host_data = (char*)host_dm + host_dm->data_start;
3640 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3641 switch (ie->host_cmd) {
3643 case DM_LIST_DEVICES:
3646 case DM_DEV_SUSPEND:
3649 case DM_TABLE_STATUS:
3650 case DM_TABLE_CLEAR:
3652 case DM_LIST_VERSIONS:
3656 case DM_DEV_SET_GEOMETRY:
3657 /* data contains only strings */
3658 memcpy(host_data, argptr, guest_data_size);
3661 memcpy(host_data, argptr, guest_data_size);
3662 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3666 void *gspec = argptr;
3667 void *cur_data = host_data;
3668 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3669 int spec_size = thunk_type_size(arg_type, 0);
3672 for (i = 0; i < host_dm->target_count; i++) {
3673 struct dm_target_spec *spec = cur_data;
3677 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3678 slen = strlen((char*)gspec + spec_size) + 1;
3680 spec->next = sizeof(*spec) + slen;
3681 strcpy((char*)&spec[1], gspec + spec_size);
3683 cur_data += spec->next;
3688 ret = -TARGET_EINVAL;
3689 unlock_user(argptr, guest_data, 0);
3692 unlock_user(argptr, guest_data, 0);
3694 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3695 if (!is_error(ret)) {
3696 guest_data = arg + host_dm->data_start;
3697 guest_data_size = host_dm->data_size - host_dm->data_start;
3698 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3699 switch (ie->host_cmd) {
3704 case DM_DEV_SUSPEND:
3707 case DM_TABLE_CLEAR:
3709 case DM_DEV_SET_GEOMETRY:
3710 /* no return data */
3712 case DM_LIST_DEVICES:
3714 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3715 uint32_t remaining_data = guest_data_size;
3716 void *cur_data = argptr;
3717 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3718 int nl_size = 12; /* can't use thunk_size due to alignment */
3721 uint32_t next = nl->next;
3723 nl->next = nl_size + (strlen(nl->name) + 1);
3725 if (remaining_data < nl->next) {
3726 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3729 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3730 strcpy(cur_data + nl_size, nl->name);
3731 cur_data += nl->next;
3732 remaining_data -= nl->next;
3736 nl = (void*)nl + next;
3741 case DM_TABLE_STATUS:
3743 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3744 void *cur_data = argptr;
3745 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3746 int spec_size = thunk_type_size(arg_type, 0);
3749 for (i = 0; i < host_dm->target_count; i++) {
3750 uint32_t next = spec->next;
3751 int slen = strlen((char*)&spec[1]) + 1;
3752 spec->next = (cur_data - argptr) + spec_size + slen;
3753 if (guest_data_size < spec->next) {
3754 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3757 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3758 strcpy(cur_data + spec_size, (char*)&spec[1]);
3759 cur_data = argptr + spec->next;
3760 spec = (void*)host_dm + host_dm->data_start + next;
3766 void *hdata = (void*)host_dm + host_dm->data_start;
3767 int count = *(uint32_t*)hdata;
3768 uint64_t *hdev = hdata + 8;
3769 uint64_t *gdev = argptr + 8;
3772 *(uint32_t*)argptr = tswap32(count);
3773 for (i = 0; i < count; i++) {
3774 *gdev = tswap64(*hdev);
3780 case DM_LIST_VERSIONS:
3782 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3783 uint32_t remaining_data = guest_data_size;
3784 void *cur_data = argptr;
3785 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3786 int vers_size = thunk_type_size(arg_type, 0);
3789 uint32_t next = vers->next;
3791 vers->next = vers_size + (strlen(vers->name) + 1);
3793 if (remaining_data < vers->next) {
3794 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3797 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3798 strcpy(cur_data + vers_size, vers->name);
3799 cur_data += vers->next;
3800 remaining_data -= vers->next;
3804 vers = (void*)vers + next;
3809 unlock_user(argptr, guest_data, 0);
3810 ret = -TARGET_EINVAL;
3813 unlock_user(argptr, guest_data, guest_data_size);
3815 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3817 ret = -TARGET_EFAULT;
3820 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3821 unlock_user(argptr, arg, target_size);
3828 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3829 int cmd, abi_long arg)
3833 const argtype *arg_type = ie->arg_type;
3834 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3837 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3838 struct blkpg_partition host_part;
3840 /* Read and convert blkpg */
3842 target_size = thunk_type_size(arg_type, 0);
3843 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3845 ret = -TARGET_EFAULT;
3848 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3849 unlock_user(argptr, arg, 0);
3851 switch (host_blkpg->op) {
3852 case BLKPG_ADD_PARTITION:
3853 case BLKPG_DEL_PARTITION:
3854 /* payload is struct blkpg_partition */
3857 /* Unknown opcode */
3858 ret = -TARGET_EINVAL;
3862 /* Read and convert blkpg->data */
3863 arg = (abi_long)(uintptr_t)host_blkpg->data;
3864 target_size = thunk_type_size(part_arg_type, 0);
3865 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3867 ret = -TARGET_EFAULT;
3870 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3871 unlock_user(argptr, arg, 0);
3873 /* Swizzle the data pointer to our local copy and call! */
3874 host_blkpg->data = &host_part;
3875 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3881 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3882 int fd, int cmd, abi_long arg)
3884 const argtype *arg_type = ie->arg_type;
3885 const StructEntry *se;
3886 const argtype *field_types;
3887 const int *dst_offsets, *src_offsets;
3890 abi_ulong *target_rt_dev_ptr;
3891 unsigned long *host_rt_dev_ptr;
3895 assert(ie->access == IOC_W);
3896 assert(*arg_type == TYPE_PTR);
3898 assert(*arg_type == TYPE_STRUCT);
3899 target_size = thunk_type_size(arg_type, 0);
3900 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3902 return -TARGET_EFAULT;
3905 assert(*arg_type == (int)STRUCT_rtentry);
3906 se = struct_entries + *arg_type++;
3907 assert(se->convert[0] == NULL);
3908 /* convert struct here to be able to catch rt_dev string */
3909 field_types = se->field_types;
3910 dst_offsets = se->field_offsets[THUNK_HOST];
3911 src_offsets = se->field_offsets[THUNK_TARGET];
3912 for (i = 0; i < se->nb_fields; i++) {
3913 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3914 assert(*field_types == TYPE_PTRVOID);
3915 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3916 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3917 if (*target_rt_dev_ptr != 0) {
3918 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3919 tswapal(*target_rt_dev_ptr));
3920 if (!*host_rt_dev_ptr) {
3921 unlock_user(argptr, arg, 0);
3922 return -TARGET_EFAULT;
3925 *host_rt_dev_ptr = 0;
3930 field_types = thunk_convert(buf_temp + dst_offsets[i],
3931 argptr + src_offsets[i],
3932 field_types, THUNK_HOST);
3934 unlock_user(argptr, arg, 0);
3936 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3937 if (*host_rt_dev_ptr != 0) {
3938 unlock_user((void *)*host_rt_dev_ptr,
3939 *target_rt_dev_ptr, 0);
3944 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
3945 int fd, int cmd, abi_long arg)
3947 int sig = target_to_host_signal(arg);
3948 return get_errno(ioctl(fd, ie->host_cmd, sig));
3951 static IOCTLEntry ioctl_entries[] = {
3952 #define IOCTL(cmd, access, ...) \
3953 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3954 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3955 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3960 /* ??? Implement proper locking for ioctls. */
3961 /* do_ioctl() Must return target values and target errnos. */
3962 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
3964 const IOCTLEntry *ie;
3965 const argtype *arg_type;
3967 uint8_t buf_temp[MAX_STRUCT_SIZE];
3973 if (ie->target_cmd == 0) {
3974 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3975 return -TARGET_ENOSYS;
3977 if (ie->target_cmd == cmd)
3981 arg_type = ie->arg_type;
3983 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3986 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3989 switch(arg_type[0]) {
3992 ret = get_errno(ioctl(fd, ie->host_cmd));
3996 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4000 target_size = thunk_type_size(arg_type, 0);
4001 switch(ie->access) {
4003 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4004 if (!is_error(ret)) {
4005 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4007 return -TARGET_EFAULT;
4008 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4009 unlock_user(argptr, arg, target_size);
4013 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4015 return -TARGET_EFAULT;
4016 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4017 unlock_user(argptr, arg, 0);
4018 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4022 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4024 return -TARGET_EFAULT;
4025 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4026 unlock_user(argptr, arg, 0);
4027 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4028 if (!is_error(ret)) {
4029 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4031 return -TARGET_EFAULT;
4032 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4033 unlock_user(argptr, arg, target_size);
4039 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4040 (long)cmd, arg_type[0]);
4041 ret = -TARGET_ENOSYS;
4047 static const bitmask_transtbl iflag_tbl[] = {
4048 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4049 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4050 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4051 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4052 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4053 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4054 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4055 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4056 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4057 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4058 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4059 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4060 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4061 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4065 static const bitmask_transtbl oflag_tbl[] = {
4066 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4067 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4068 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4069 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4070 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4071 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4072 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4073 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4074 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4075 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4076 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4077 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4078 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4079 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4080 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4081 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4082 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4083 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4084 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4085 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4086 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4087 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4088 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4089 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4093 static const bitmask_transtbl cflag_tbl[] = {
4094 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4095 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4096 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4097 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4098 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4099 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4100 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4101 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4102 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4103 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4104 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4105 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4106 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4107 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4108 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4109 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4110 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4111 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4112 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4113 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4114 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4115 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4116 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4117 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4118 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4119 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4120 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4121 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4122 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4123 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4124 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4128 static const bitmask_transtbl lflag_tbl[] = {
4129 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4130 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4131 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4132 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4133 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4134 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4135 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4136 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4137 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4138 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4139 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4140 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4141 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4142 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4143 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4147 static void target_to_host_termios (void *dst, const void *src)
4149 struct host_termios *host = dst;
4150 const struct target_termios *target = src;
4153 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4155 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4157 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4159 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4160 host->c_line = target->c_line;
4162 memset(host->c_cc, 0, sizeof(host->c_cc));
4163 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4164 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4165 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4166 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4167 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4168 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4169 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4170 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4171 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4172 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4173 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4174 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4175 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4176 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4177 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4178 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4179 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4182 static void host_to_target_termios (void *dst, const void *src)
4184 struct target_termios *target = dst;
4185 const struct host_termios *host = src;
4188 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4190 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4192 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4194 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4195 target->c_line = host->c_line;
4197 memset(target->c_cc, 0, sizeof(target->c_cc));
4198 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4199 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4200 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4201 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4202 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4203 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4204 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4205 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4206 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4207 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4208 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4209 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4210 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4211 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4212 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4213 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4214 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4217 static const StructEntry struct_termios_def = {
4218 .convert = { host_to_target_termios, target_to_host_termios },
4219 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4220 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4223 static bitmask_transtbl mmap_flags_tbl[] = {
4224 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4225 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4226 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4227 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4228 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4229 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4230 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4231 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4232 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4237 #if defined(TARGET_I386)
4239 /* NOTE: there is really one LDT for all the threads */
4240 static uint8_t *ldt_table;
4242 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4249 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4250 if (size > bytecount)
4252 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4254 return -TARGET_EFAULT;
4255 /* ??? Should this by byteswapped? */
4256 memcpy(p, ldt_table, size);
4257 unlock_user(p, ptr, size);
4261 /* XXX: add locking support */
4262 static abi_long write_ldt(CPUX86State *env,
4263 abi_ulong ptr, unsigned long bytecount, int oldmode)
4265 struct target_modify_ldt_ldt_s ldt_info;
4266 struct target_modify_ldt_ldt_s *target_ldt_info;
4267 int seg_32bit, contents, read_exec_only, limit_in_pages;
4268 int seg_not_present, useable, lm;
4269 uint32_t *lp, entry_1, entry_2;
4271 if (bytecount != sizeof(ldt_info))
4272 return -TARGET_EINVAL;
4273 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4274 return -TARGET_EFAULT;
4275 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4276 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4277 ldt_info.limit = tswap32(target_ldt_info->limit);
4278 ldt_info.flags = tswap32(target_ldt_info->flags);
4279 unlock_user_struct(target_ldt_info, ptr, 0);
4281 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4282 return -TARGET_EINVAL;
4283 seg_32bit = ldt_info.flags & 1;
4284 contents = (ldt_info.flags >> 1) & 3;
4285 read_exec_only = (ldt_info.flags >> 3) & 1;
4286 limit_in_pages = (ldt_info.flags >> 4) & 1;
4287 seg_not_present = (ldt_info.flags >> 5) & 1;
4288 useable = (ldt_info.flags >> 6) & 1;
4292 lm = (ldt_info.flags >> 7) & 1;
4294 if (contents == 3) {
4296 return -TARGET_EINVAL;
4297 if (seg_not_present == 0)
4298 return -TARGET_EINVAL;
4300 /* allocate the LDT */
4302 env->ldt.base = target_mmap(0,
4303 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4304 PROT_READ|PROT_WRITE,
4305 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4306 if (env->ldt.base == -1)
4307 return -TARGET_ENOMEM;
4308 memset(g2h(env->ldt.base), 0,
4309 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4310 env->ldt.limit = 0xffff;
4311 ldt_table = g2h(env->ldt.base);
4314 /* NOTE: same code as Linux kernel */
4315 /* Allow LDTs to be cleared by the user. */
4316 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4319 read_exec_only == 1 &&
4321 limit_in_pages == 0 &&
4322 seg_not_present == 1 &&
4330 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4331 (ldt_info.limit & 0x0ffff);
4332 entry_2 = (ldt_info.base_addr & 0xff000000) |
4333 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4334 (ldt_info.limit & 0xf0000) |
4335 ((read_exec_only ^ 1) << 9) |
4337 ((seg_not_present ^ 1) << 15) |
4339 (limit_in_pages << 23) |
4343 entry_2 |= (useable << 20);
4345 /* Install the new entry ... */
4347 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4348 lp[0] = tswap32(entry_1);
4349 lp[1] = tswap32(entry_2);
4353 /* specific and weird i386 syscalls */
4354 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4355 unsigned long bytecount)
4361 ret = read_ldt(ptr, bytecount);
4364 ret = write_ldt(env, ptr, bytecount, 1);
4367 ret = write_ldt(env, ptr, bytecount, 0);
4370 ret = -TARGET_ENOSYS;
4376 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4377 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4379 uint64_t *gdt_table = g2h(env->gdt.base);
4380 struct target_modify_ldt_ldt_s ldt_info;
4381 struct target_modify_ldt_ldt_s *target_ldt_info;
4382 int seg_32bit, contents, read_exec_only, limit_in_pages;
4383 int seg_not_present, useable, lm;
4384 uint32_t *lp, entry_1, entry_2;
4387 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4388 if (!target_ldt_info)
4389 return -TARGET_EFAULT;
4390 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4391 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4392 ldt_info.limit = tswap32(target_ldt_info->limit);
4393 ldt_info.flags = tswap32(target_ldt_info->flags);
4394 if (ldt_info.entry_number == -1) {
4395 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4396 if (gdt_table[i] == 0) {
4397 ldt_info.entry_number = i;
4398 target_ldt_info->entry_number = tswap32(i);
4403 unlock_user_struct(target_ldt_info, ptr, 1);
4405 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4406 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4407 return -TARGET_EINVAL;
4408 seg_32bit = ldt_info.flags & 1;
4409 contents = (ldt_info.flags >> 1) & 3;
4410 read_exec_only = (ldt_info.flags >> 3) & 1;
4411 limit_in_pages = (ldt_info.flags >> 4) & 1;
4412 seg_not_present = (ldt_info.flags >> 5) & 1;
4413 useable = (ldt_info.flags >> 6) & 1;
4417 lm = (ldt_info.flags >> 7) & 1;
4420 if (contents == 3) {
4421 if (seg_not_present == 0)
4422 return -TARGET_EINVAL;
4425 /* NOTE: same code as Linux kernel */
4426 /* Allow LDTs to be cleared by the user. */
4427 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4428 if ((contents == 0 &&
4429 read_exec_only == 1 &&
4431 limit_in_pages == 0 &&
4432 seg_not_present == 1 &&
4440 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4441 (ldt_info.limit & 0x0ffff);
4442 entry_2 = (ldt_info.base_addr & 0xff000000) |
4443 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4444 (ldt_info.limit & 0xf0000) |
4445 ((read_exec_only ^ 1) << 9) |
4447 ((seg_not_present ^ 1) << 15) |
4449 (limit_in_pages << 23) |
4454 /* Install the new entry ... */
4456 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4457 lp[0] = tswap32(entry_1);
4458 lp[1] = tswap32(entry_2);
4462 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4464 struct target_modify_ldt_ldt_s *target_ldt_info;
4465 uint64_t *gdt_table = g2h(env->gdt.base);
4466 uint32_t base_addr, limit, flags;
4467 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4468 int seg_not_present, useable, lm;
4469 uint32_t *lp, entry_1, entry_2;
4471 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4472 if (!target_ldt_info)
4473 return -TARGET_EFAULT;
4474 idx = tswap32(target_ldt_info->entry_number);
4475 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4476 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4477 unlock_user_struct(target_ldt_info, ptr, 1);
4478 return -TARGET_EINVAL;
4480 lp = (uint32_t *)(gdt_table + idx);
4481 entry_1 = tswap32(lp[0]);
4482 entry_2 = tswap32(lp[1]);
4484 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4485 contents = (entry_2 >> 10) & 3;
4486 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4487 seg_32bit = (entry_2 >> 22) & 1;
4488 limit_in_pages = (entry_2 >> 23) & 1;
4489 useable = (entry_2 >> 20) & 1;
4493 lm = (entry_2 >> 21) & 1;
4495 flags = (seg_32bit << 0) | (contents << 1) |
4496 (read_exec_only << 3) | (limit_in_pages << 4) |
4497 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4498 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4499 base_addr = (entry_1 >> 16) |
4500 (entry_2 & 0xff000000) |
4501 ((entry_2 & 0xff) << 16);
4502 target_ldt_info->base_addr = tswapal(base_addr);
4503 target_ldt_info->limit = tswap32(limit);
4504 target_ldt_info->flags = tswap32(flags);
4505 unlock_user_struct(target_ldt_info, ptr, 1);
4508 #endif /* TARGET_I386 && TARGET_ABI32 */
4510 #ifndef TARGET_ABI32
4511 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4518 case TARGET_ARCH_SET_GS:
4519 case TARGET_ARCH_SET_FS:
4520 if (code == TARGET_ARCH_SET_GS)
4524 cpu_x86_load_seg(env, idx, 0);
4525 env->segs[idx].base = addr;
4527 case TARGET_ARCH_GET_GS:
4528 case TARGET_ARCH_GET_FS:
4529 if (code == TARGET_ARCH_GET_GS)
4533 val = env->segs[idx].base;
4534 if (put_user(val, addr, abi_ulong))
4535 ret = -TARGET_EFAULT;
4538 ret = -TARGET_EINVAL;
4545 #endif /* defined(TARGET_I386) */
4547 #define NEW_STACK_SIZE 0x40000
4550 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4553 pthread_mutex_t mutex;
4554 pthread_cond_t cond;
4557 abi_ulong child_tidptr;
4558 abi_ulong parent_tidptr;
4562 static void *clone_func(void *arg)
4564 new_thread_info *info = arg;
4569 rcu_register_thread();
4571 cpu = ENV_GET_CPU(env);
4573 ts = (TaskState *)cpu->opaque;
4574 info->tid = gettid();
4575 cpu->host_tid = info->tid;
4577 if (info->child_tidptr)
4578 put_user_u32(info->tid, info->child_tidptr);
4579 if (info->parent_tidptr)
4580 put_user_u32(info->tid, info->parent_tidptr);
4581 /* Enable signals. */
4582 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4583 /* Signal to the parent that we're ready. */
4584 pthread_mutex_lock(&info->mutex);
4585 pthread_cond_broadcast(&info->cond);
4586 pthread_mutex_unlock(&info->mutex);
4587 /* Wait until the parent has finshed initializing the tls state. */
4588 pthread_mutex_lock(&clone_lock);
4589 pthread_mutex_unlock(&clone_lock);
4595 /* do_fork() Must return host values and target errnos (unlike most
4596 do_*() functions). */
4597 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4598 abi_ulong parent_tidptr, target_ulong newtls,
4599 abi_ulong child_tidptr)
4601 CPUState *cpu = ENV_GET_CPU(env);
4605 CPUArchState *new_env;
4606 unsigned int nptl_flags;
4609 /* Emulate vfork() with fork() */
4610 if (flags & CLONE_VFORK)
4611 flags &= ~(CLONE_VFORK | CLONE_VM);
4613 if (flags & CLONE_VM) {
4614 TaskState *parent_ts = (TaskState *)cpu->opaque;
4615 new_thread_info info;
4616 pthread_attr_t attr;
4618 ts = g_new0(TaskState, 1);
4619 init_task_state(ts);
4620 /* we create a new CPU instance. */
4621 new_env = cpu_copy(env);
4622 /* Init regs that differ from the parent. */
4623 cpu_clone_regs(new_env, newsp);
4624 new_cpu = ENV_GET_CPU(new_env);
4625 new_cpu->opaque = ts;
4626 ts->bprm = parent_ts->bprm;
4627 ts->info = parent_ts->info;
4629 flags &= ~CLONE_NPTL_FLAGS2;
4631 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4632 ts->child_tidptr = child_tidptr;
4635 if (nptl_flags & CLONE_SETTLS)
4636 cpu_set_tls (new_env, newtls);
4638 /* Grab a mutex so that thread setup appears atomic. */
4639 pthread_mutex_lock(&clone_lock);
4641 memset(&info, 0, sizeof(info));
4642 pthread_mutex_init(&info.mutex, NULL);
4643 pthread_mutex_lock(&info.mutex);
4644 pthread_cond_init(&info.cond, NULL);
4646 if (nptl_flags & CLONE_CHILD_SETTID)
4647 info.child_tidptr = child_tidptr;
4648 if (nptl_flags & CLONE_PARENT_SETTID)
4649 info.parent_tidptr = parent_tidptr;
4651 ret = pthread_attr_init(&attr);
4652 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4653 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4654 /* It is not safe to deliver signals until the child has finished
4655 initializing, so temporarily block all signals. */
4656 sigfillset(&sigmask);
4657 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4659 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4660 /* TODO: Free new CPU state if thread creation failed. */
4662 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4663 pthread_attr_destroy(&attr);
4665 /* Wait for the child to initialize. */
4666 pthread_cond_wait(&info.cond, &info.mutex);
4668 if (flags & CLONE_PARENT_SETTID)
4669 put_user_u32(ret, parent_tidptr);
4673 pthread_mutex_unlock(&info.mutex);
4674 pthread_cond_destroy(&info.cond);
4675 pthread_mutex_destroy(&info.mutex);
4676 pthread_mutex_unlock(&clone_lock);
4678 /* if no CLONE_VM, we consider it is a fork */
4679 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
4680 return -TARGET_EINVAL;
4685 /* Child Process. */
4687 cpu_clone_regs(env, newsp);
4689 /* There is a race condition here. The parent process could
4690 theoretically read the TID in the child process before the child
4691 tid is set. This would require using either ptrace
4692 (not implemented) or having *_tidptr to point at a shared memory
4693 mapping. We can't repeat the spinlock hack used above because
4694 the child process gets its own copy of the lock. */
4695 if (flags & CLONE_CHILD_SETTID)
4696 put_user_u32(gettid(), child_tidptr);
4697 if (flags & CLONE_PARENT_SETTID)
4698 put_user_u32(gettid(), parent_tidptr);
4699 ts = (TaskState *)cpu->opaque;
4700 if (flags & CLONE_SETTLS)
4701 cpu_set_tls (env, newtls);
4702 if (flags & CLONE_CHILD_CLEARTID)
4703 ts->child_tidptr = child_tidptr;
4711 /* warning : doesn't handle linux specific flags... */
4712 static int target_to_host_fcntl_cmd(int cmd)
4715 case TARGET_F_DUPFD:
4716 case TARGET_F_GETFD:
4717 case TARGET_F_SETFD:
4718 case TARGET_F_GETFL:
4719 case TARGET_F_SETFL:
4721 case TARGET_F_GETLK:
4723 case TARGET_F_SETLK:
4725 case TARGET_F_SETLKW:
4727 case TARGET_F_GETOWN:
4729 case TARGET_F_SETOWN:
4731 case TARGET_F_GETSIG:
4733 case TARGET_F_SETSIG:
4735 #if TARGET_ABI_BITS == 32
4736 case TARGET_F_GETLK64:
4738 case TARGET_F_SETLK64:
4740 case TARGET_F_SETLKW64:
4743 case TARGET_F_SETLEASE:
4745 case TARGET_F_GETLEASE:
4747 #ifdef F_DUPFD_CLOEXEC
4748 case TARGET_F_DUPFD_CLOEXEC:
4749 return F_DUPFD_CLOEXEC;
4751 case TARGET_F_NOTIFY:
4754 case TARGET_F_GETOWN_EX:
4758 case TARGET_F_SETOWN_EX:
4762 return -TARGET_EINVAL;
4764 return -TARGET_EINVAL;
4767 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4768 static const bitmask_transtbl flock_tbl[] = {
4769 TRANSTBL_CONVERT(F_RDLCK),
4770 TRANSTBL_CONVERT(F_WRLCK),
4771 TRANSTBL_CONVERT(F_UNLCK),
4772 TRANSTBL_CONVERT(F_EXLCK),
4773 TRANSTBL_CONVERT(F_SHLCK),
4777 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4780 struct target_flock *target_fl;
4781 struct flock64 fl64;
4782 struct target_flock64 *target_fl64;
4784 struct f_owner_ex fox;
4785 struct target_f_owner_ex *target_fox;
4788 int host_cmd = target_to_host_fcntl_cmd(cmd);
4790 if (host_cmd == -TARGET_EINVAL)
4794 case TARGET_F_GETLK:
4795 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4796 return -TARGET_EFAULT;
4798 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4799 fl.l_whence = tswap16(target_fl->l_whence);
4800 fl.l_start = tswapal(target_fl->l_start);
4801 fl.l_len = tswapal(target_fl->l_len);
4802 fl.l_pid = tswap32(target_fl->l_pid);
4803 unlock_user_struct(target_fl, arg, 0);
4804 ret = get_errno(fcntl(fd, host_cmd, &fl));
4806 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4807 return -TARGET_EFAULT;
4809 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4810 target_fl->l_whence = tswap16(fl.l_whence);
4811 target_fl->l_start = tswapal(fl.l_start);
4812 target_fl->l_len = tswapal(fl.l_len);
4813 target_fl->l_pid = tswap32(fl.l_pid);
4814 unlock_user_struct(target_fl, arg, 1);
4818 case TARGET_F_SETLK:
4819 case TARGET_F_SETLKW:
4820 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4821 return -TARGET_EFAULT;
4823 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4824 fl.l_whence = tswap16(target_fl->l_whence);
4825 fl.l_start = tswapal(target_fl->l_start);
4826 fl.l_len = tswapal(target_fl->l_len);
4827 fl.l_pid = tswap32(target_fl->l_pid);
4828 unlock_user_struct(target_fl, arg, 0);
4829 ret = get_errno(fcntl(fd, host_cmd, &fl));
4832 case TARGET_F_GETLK64:
4833 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4834 return -TARGET_EFAULT;
4836 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4837 fl64.l_whence = tswap16(target_fl64->l_whence);
4838 fl64.l_start = tswap64(target_fl64->l_start);
4839 fl64.l_len = tswap64(target_fl64->l_len);
4840 fl64.l_pid = tswap32(target_fl64->l_pid);
4841 unlock_user_struct(target_fl64, arg, 0);
4842 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4844 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4845 return -TARGET_EFAULT;
4846 target_fl64->l_type =
4847 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4848 target_fl64->l_whence = tswap16(fl64.l_whence);
4849 target_fl64->l_start = tswap64(fl64.l_start);
4850 target_fl64->l_len = tswap64(fl64.l_len);
4851 target_fl64->l_pid = tswap32(fl64.l_pid);
4852 unlock_user_struct(target_fl64, arg, 1);
4855 case TARGET_F_SETLK64:
4856 case TARGET_F_SETLKW64:
4857 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4858 return -TARGET_EFAULT;
4860 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4861 fl64.l_whence = tswap16(target_fl64->l_whence);
4862 fl64.l_start = tswap64(target_fl64->l_start);
4863 fl64.l_len = tswap64(target_fl64->l_len);
4864 fl64.l_pid = tswap32(target_fl64->l_pid);
4865 unlock_user_struct(target_fl64, arg, 0);
4866 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4869 case TARGET_F_GETFL:
4870 ret = get_errno(fcntl(fd, host_cmd, arg));
4872 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4876 case TARGET_F_SETFL:
4877 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4881 case TARGET_F_GETOWN_EX:
4882 ret = get_errno(fcntl(fd, host_cmd, &fox));
4884 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4885 return -TARGET_EFAULT;
4886 target_fox->type = tswap32(fox.type);
4887 target_fox->pid = tswap32(fox.pid);
4888 unlock_user_struct(target_fox, arg, 1);
4894 case TARGET_F_SETOWN_EX:
4895 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4896 return -TARGET_EFAULT;
4897 fox.type = tswap32(target_fox->type);
4898 fox.pid = tswap32(target_fox->pid);
4899 unlock_user_struct(target_fox, arg, 0);
4900 ret = get_errno(fcntl(fd, host_cmd, &fox));
4904 case TARGET_F_SETOWN:
4905 case TARGET_F_GETOWN:
4906 case TARGET_F_SETSIG:
4907 case TARGET_F_GETSIG:
4908 case TARGET_F_SETLEASE:
4909 case TARGET_F_GETLEASE:
4910 ret = get_errno(fcntl(fd, host_cmd, arg));
4914 ret = get_errno(fcntl(fd, cmd, arg));
4922 static inline int high2lowuid(int uid)
4930 static inline int high2lowgid(int gid)
4938 static inline int low2highuid(int uid)
4940 if ((int16_t)uid == -1)
4946 static inline int low2highgid(int gid)
4948 if ((int16_t)gid == -1)
4953 static inline int tswapid(int id)
4958 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4960 #else /* !USE_UID16 */
4961 static inline int high2lowuid(int uid)
4965 static inline int high2lowgid(int gid)
4969 static inline int low2highuid(int uid)
4973 static inline int low2highgid(int gid)
4977 static inline int tswapid(int id)
4982 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4984 #endif /* USE_UID16 */
4986 void syscall_init(void)
4989 const argtype *arg_type;
4993 thunk_init(STRUCT_MAX);
4995 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4996 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4997 #include "syscall_types.h"
4999 #undef STRUCT_SPECIAL
5001 /* Build target_to_host_errno_table[] table from
5002 * host_to_target_errno_table[]. */
5003 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5004 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5007 /* we patch the ioctl size if necessary. We rely on the fact that
5008 no ioctl has all the bits at '1' in the size field */
5010 while (ie->target_cmd != 0) {
5011 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5012 TARGET_IOC_SIZEMASK) {
5013 arg_type = ie->arg_type;
5014 if (arg_type[0] != TYPE_PTR) {
5015 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5020 size = thunk_type_size(arg_type, 0);
5021 ie->target_cmd = (ie->target_cmd &
5022 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5023 (size << TARGET_IOC_SIZESHIFT);
5026 /* automatic consistency check if same arch */
5027 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5028 (defined(__x86_64__) && defined(TARGET_X86_64))
5029 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5030 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5031 ie->name, ie->target_cmd, ie->host_cmd);
5038 #if TARGET_ABI_BITS == 32
5039 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5041 #ifdef TARGET_WORDS_BIGENDIAN
5042 return ((uint64_t)word0 << 32) | word1;
5044 return ((uint64_t)word1 << 32) | word0;
5047 #else /* TARGET_ABI_BITS == 32 */
5048 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5052 #endif /* TARGET_ABI_BITS != 32 */
5054 #ifdef TARGET_NR_truncate64
5055 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5060 if (regpairs_aligned(cpu_env)) {
5064 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5068 #ifdef TARGET_NR_ftruncate64
5069 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5074 if (regpairs_aligned(cpu_env)) {
5078 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5082 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5083 abi_ulong target_addr)
5085 struct target_timespec *target_ts;
5087 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5088 return -TARGET_EFAULT;
5089 host_ts->tv_sec = tswapal(target_ts->tv_sec);
5090 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
5091 unlock_user_struct(target_ts, target_addr, 0);
5095 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5096 struct timespec *host_ts)
5098 struct target_timespec *target_ts;
5100 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5101 return -TARGET_EFAULT;
5102 target_ts->tv_sec = tswapal(host_ts->tv_sec);
5103 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
5104 unlock_user_struct(target_ts, target_addr, 1);
5108 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5109 abi_ulong target_addr)
5111 struct target_itimerspec *target_itspec;
5113 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5114 return -TARGET_EFAULT;
5117 host_itspec->it_interval.tv_sec =
5118 tswapal(target_itspec->it_interval.tv_sec);
5119 host_itspec->it_interval.tv_nsec =
5120 tswapal(target_itspec->it_interval.tv_nsec);
5121 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5122 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5124 unlock_user_struct(target_itspec, target_addr, 1);
5128 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5129 struct itimerspec *host_its)
5131 struct target_itimerspec *target_itspec;
5133 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5134 return -TARGET_EFAULT;
5137 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5138 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5140 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5141 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5143 unlock_user_struct(target_itspec, target_addr, 0);
5147 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5148 abi_ulong target_addr)
5150 struct target_sigevent *target_sevp;
5152 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5153 return -TARGET_EFAULT;
5156 /* This union is awkward on 64 bit systems because it has a 32 bit
5157 * integer and a pointer in it; we follow the conversion approach
5158 * used for handling sigval types in signal.c so the guest should get
5159 * the correct value back even if we did a 64 bit byteswap and it's
5160 * using the 32 bit integer.
5162 host_sevp->sigev_value.sival_ptr =
5163 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5164 host_sevp->sigev_signo =
5165 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5166 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5167 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5169 unlock_user_struct(target_sevp, target_addr, 1);
5173 #if defined(TARGET_NR_mlockall)
5174 static inline int target_to_host_mlockall_arg(int arg)
5178 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5179 result |= MCL_CURRENT;
5181 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5182 result |= MCL_FUTURE;
5188 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5189 static inline abi_long host_to_target_stat64(void *cpu_env,
5190 abi_ulong target_addr,
5191 struct stat *host_st)
5193 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5194 if (((CPUARMState *)cpu_env)->eabi) {
5195 struct target_eabi_stat64 *target_st;
5197 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5198 return -TARGET_EFAULT;
5199 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5200 __put_user(host_st->st_dev, &target_st->st_dev);
5201 __put_user(host_st->st_ino, &target_st->st_ino);
5202 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5203 __put_user(host_st->st_ino, &target_st->__st_ino);
5205 __put_user(host_st->st_mode, &target_st->st_mode);
5206 __put_user(host_st->st_nlink, &target_st->st_nlink);
5207 __put_user(host_st->st_uid, &target_st->st_uid);
5208 __put_user(host_st->st_gid, &target_st->st_gid);
5209 __put_user(host_st->st_rdev, &target_st->st_rdev);
5210 __put_user(host_st->st_size, &target_st->st_size);
5211 __put_user(host_st->st_blksize, &target_st->st_blksize);
5212 __put_user(host_st->st_blocks, &target_st->st_blocks);
5213 __put_user(host_st->st_atime, &target_st->target_st_atime);
5214 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5215 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5216 unlock_user_struct(target_st, target_addr, 1);
5220 #if defined(TARGET_HAS_STRUCT_STAT64)
5221 struct target_stat64 *target_st;
5223 struct target_stat *target_st;
5226 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5227 return -TARGET_EFAULT;
5228 memset(target_st, 0, sizeof(*target_st));
5229 __put_user(host_st->st_dev, &target_st->st_dev);
5230 __put_user(host_st->st_ino, &target_st->st_ino);
5231 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5232 __put_user(host_st->st_ino, &target_st->__st_ino);
5234 __put_user(host_st->st_mode, &target_st->st_mode);
5235 __put_user(host_st->st_nlink, &target_st->st_nlink);
5236 __put_user(host_st->st_uid, &target_st->st_uid);
5237 __put_user(host_st->st_gid, &target_st->st_gid);
5238 __put_user(host_st->st_rdev, &target_st->st_rdev);
5239 /* XXX: better use of kernel struct */
5240 __put_user(host_st->st_size, &target_st->st_size);
5241 __put_user(host_st->st_blksize, &target_st->st_blksize);
5242 __put_user(host_st->st_blocks, &target_st->st_blocks);
5243 __put_user(host_st->st_atime, &target_st->target_st_atime);
5244 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5245 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5246 unlock_user_struct(target_st, target_addr, 1);
5253 /* ??? Using host futex calls even when target atomic operations
5254 are not really atomic probably breaks things. However implementing
5255 futexes locally would make futexes shared between multiple processes
5256 tricky. However they're probably useless because guest atomic
5257 operations won't work either. */
5258 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5259 target_ulong uaddr2, int val3)
5261 struct timespec ts, *pts;
5264 /* ??? We assume FUTEX_* constants are the same on both host
5266 #ifdef FUTEX_CMD_MASK
5267 base_op = op & FUTEX_CMD_MASK;
5273 case FUTEX_WAIT_BITSET:
5276 target_to_host_timespec(pts, timeout);
5280 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5283 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5285 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5287 case FUTEX_CMP_REQUEUE:
5289 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5290 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5291 But the prototype takes a `struct timespec *'; insert casts
5292 to satisfy the compiler. We do not need to tswap TIMEOUT
5293 since it's not compared to guest memory. */
5294 pts = (struct timespec *)(uintptr_t) timeout;
5295 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5297 (base_op == FUTEX_CMP_REQUEUE
5301 return -TARGET_ENOSYS;
5304 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5305 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
5306 abi_long handle, abi_long mount_id,
5309 struct file_handle *target_fh;
5310 struct file_handle *fh;
5314 unsigned int size, total_size;
5316 if (get_user_s32(size, handle)) {
5317 return -TARGET_EFAULT;
5320 name = lock_user_string(pathname);
5322 return -TARGET_EFAULT;
5325 total_size = sizeof(struct file_handle) + size;
5326 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
5328 unlock_user(name, pathname, 0);
5329 return -TARGET_EFAULT;
5332 fh = g_malloc0(total_size);
5333 fh->handle_bytes = size;
5335 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
5336 unlock_user(name, pathname, 0);
5338 /* man name_to_handle_at(2):
5339 * Other than the use of the handle_bytes field, the caller should treat
5340 * the file_handle structure as an opaque data type
5343 memcpy(target_fh, fh, total_size);
5344 target_fh->handle_bytes = tswap32(fh->handle_bytes);
5345 target_fh->handle_type = tswap32(fh->handle_type);
5347 unlock_user(target_fh, handle, total_size);
5349 if (put_user_s32(mid, mount_id)) {
5350 return -TARGET_EFAULT;
5358 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5359 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
5362 struct file_handle *target_fh;
5363 struct file_handle *fh;
5364 unsigned int size, total_size;
5367 if (get_user_s32(size, handle)) {
5368 return -TARGET_EFAULT;
5371 total_size = sizeof(struct file_handle) + size;
5372 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
5374 return -TARGET_EFAULT;
5377 fh = g_memdup(target_fh, total_size);
5378 fh->handle_bytes = size;
5379 fh->handle_type = tswap32(target_fh->handle_type);
5381 ret = get_errno(open_by_handle_at(mount_fd, fh,
5382 target_to_host_bitmask(flags, fcntl_flags_tbl)));
5386 unlock_user(target_fh, handle, total_size);
5392 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5394 /* signalfd siginfo conversion */
5397 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
5398 const struct signalfd_siginfo *info)
5400 int sig = host_to_target_signal(info->ssi_signo);
5402 /* linux/signalfd.h defines a ssi_addr_lsb
5403 * not defined in sys/signalfd.h but used by some kernels
5406 #ifdef BUS_MCEERR_AO
5407 if (tinfo->ssi_signo == SIGBUS &&
5408 (tinfo->ssi_code == BUS_MCEERR_AR ||
5409 tinfo->ssi_code == BUS_MCEERR_AO)) {
5410 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
5411 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
5412 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
5416 tinfo->ssi_signo = tswap32(sig);
5417 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
5418 tinfo->ssi_code = tswap32(info->ssi_code);
5419 tinfo->ssi_pid = tswap32(info->ssi_pid);
5420 tinfo->ssi_uid = tswap32(info->ssi_uid);
5421 tinfo->ssi_fd = tswap32(info->ssi_fd);
5422 tinfo->ssi_tid = tswap32(info->ssi_tid);
5423 tinfo->ssi_band = tswap32(info->ssi_band);
5424 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
5425 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
5426 tinfo->ssi_status = tswap32(info->ssi_status);
5427 tinfo->ssi_int = tswap32(info->ssi_int);
5428 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
5429 tinfo->ssi_utime = tswap64(info->ssi_utime);
5430 tinfo->ssi_stime = tswap64(info->ssi_stime);
5431 tinfo->ssi_addr = tswap64(info->ssi_addr);
5434 static abi_long host_to_target_signalfd(void *buf, size_t len)
5438 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
5439 host_to_target_signalfd_siginfo(buf + i, buf + i);
5445 static TargetFdTrans target_signalfd_trans = {
5446 .host_to_target = host_to_target_signalfd,
5449 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
5452 target_sigset_t *target_mask;
5456 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
5457 return -TARGET_EINVAL;
5459 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
5460 return -TARGET_EFAULT;
5463 target_to_host_sigset(&host_mask, target_mask);
5465 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
5467 ret = get_errno(signalfd(fd, &host_mask, host_flags));
5469 fd_trans_register(ret, &target_signalfd_trans);
5472 unlock_user_struct(target_mask, mask, 0);
5478 /* Map host to target signal numbers for the wait family of syscalls.
5479 Assume all other status bits are the same. */
5480 int host_to_target_waitstatus(int status)
5482 if (WIFSIGNALED(status)) {
5483 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5485 if (WIFSTOPPED(status)) {
5486 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5492 static int open_self_cmdline(void *cpu_env, int fd)
5495 bool word_skipped = false;
5497 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5507 nb_read = read(fd_orig, buf, sizeof(buf));
5509 fd_orig = close(fd_orig);
5511 } else if (nb_read == 0) {
5515 if (!word_skipped) {
5516 /* Skip the first string, which is the path to qemu-*-static
5517 instead of the actual command. */
5518 cp_buf = memchr(buf, 0, sizeof(buf));
5520 /* Null byte found, skip one string */
5522 nb_read -= cp_buf - buf;
5523 word_skipped = true;
5528 if (write(fd, cp_buf, nb_read) != nb_read) {
5535 return close(fd_orig);
5538 static int open_self_maps(void *cpu_env, int fd)
5540 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5541 TaskState *ts = cpu->opaque;
5547 fp = fopen("/proc/self/maps", "r");
5552 while ((read = getline(&line, &len, fp)) != -1) {
5553 int fields, dev_maj, dev_min, inode;
5554 uint64_t min, max, offset;
5555 char flag_r, flag_w, flag_x, flag_p;
5556 char path[512] = "";
5557 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5558 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5559 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5561 if ((fields < 10) || (fields > 11)) {
5564 if (h2g_valid(min)) {
5565 int flags = page_get_flags(h2g(min));
5566 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5567 if (page_check_range(h2g(min), max - min, flags) == -1) {
5570 if (h2g(min) == ts->info->stack_limit) {
5571 pstrcpy(path, sizeof(path), " [stack]");
5573 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5574 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5575 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5576 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5577 path[0] ? " " : "", path);
5587 static int open_self_stat(void *cpu_env, int fd)
5589 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5590 TaskState *ts = cpu->opaque;
5591 abi_ulong start_stack = ts->info->start_stack;
5594 for (i = 0; i < 44; i++) {
5602 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5603 } else if (i == 1) {
5605 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5606 } else if (i == 27) {
5609 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5611 /* for the rest, there is MasterCard */
5612 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5616 if (write(fd, buf, len) != len) {
5624 static int open_self_auxv(void *cpu_env, int fd)
5626 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5627 TaskState *ts = cpu->opaque;
5628 abi_ulong auxv = ts->info->saved_auxv;
5629 abi_ulong len = ts->info->auxv_len;
5633 * Auxiliary vector is stored in target process stack.
5634 * read in whole auxv vector and copy it to file
5636 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5640 r = write(fd, ptr, len);
5647 lseek(fd, 0, SEEK_SET);
5648 unlock_user(ptr, auxv, len);
5654 static int is_proc_myself(const char *filename, const char *entry)
5656 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5657 filename += strlen("/proc/");
5658 if (!strncmp(filename, "self/", strlen("self/"))) {
5659 filename += strlen("self/");
5660 } else if (*filename >= '1' && *filename <= '9') {
5662 snprintf(myself, sizeof(myself), "%d/", getpid());
5663 if (!strncmp(filename, myself, strlen(myself))) {
5664 filename += strlen(myself);
5671 if (!strcmp(filename, entry)) {
5678 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5679 static int is_proc(const char *filename, const char *entry)
5681 return strcmp(filename, entry) == 0;
5684 static int open_net_route(void *cpu_env, int fd)
5691 fp = fopen("/proc/net/route", "r");
5698 read = getline(&line, &len, fp);
5699 dprintf(fd, "%s", line);
5703 while ((read = getline(&line, &len, fp)) != -1) {
5705 uint32_t dest, gw, mask;
5706 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5707 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5708 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5709 &mask, &mtu, &window, &irtt);
5710 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5711 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5712 metric, tswap32(mask), mtu, window, irtt);
5722 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5725 const char *filename;
5726 int (*fill)(void *cpu_env, int fd);
5727 int (*cmp)(const char *s1, const char *s2);
5729 const struct fake_open *fake_open;
5730 static const struct fake_open fakes[] = {
5731 { "maps", open_self_maps, is_proc_myself },
5732 { "stat", open_self_stat, is_proc_myself },
5733 { "auxv", open_self_auxv, is_proc_myself },
5734 { "cmdline", open_self_cmdline, is_proc_myself },
5735 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5736 { "/proc/net/route", open_net_route, is_proc },
5738 { NULL, NULL, NULL }
5741 if (is_proc_myself(pathname, "exe")) {
5742 int execfd = qemu_getauxval(AT_EXECFD);
5743 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5746 for (fake_open = fakes; fake_open->filename; fake_open++) {
5747 if (fake_open->cmp(pathname, fake_open->filename)) {
5752 if (fake_open->filename) {
5754 char filename[PATH_MAX];
5757 /* create temporary file to map stat to */
5758 tmpdir = getenv("TMPDIR");
5761 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5762 fd = mkstemp(filename);
5768 if ((r = fake_open->fill(cpu_env, fd))) {
5772 lseek(fd, 0, SEEK_SET);
5777 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5780 #define TIMER_MAGIC 0x0caf0000
5781 #define TIMER_MAGIC_MASK 0xffff0000
5783 /* Convert QEMU provided timer ID back to internal 16bit index format */
5784 static target_timer_t get_timer_id(abi_long arg)
5786 target_timer_t timerid = arg;
5788 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5789 return -TARGET_EINVAL;
5794 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5795 return -TARGET_EINVAL;
5801 /* do_syscall() should always have a single exit point at the end so
5802 that actions, such as logging of syscall results, can be performed.
5803 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5804 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5805 abi_long arg2, abi_long arg3, abi_long arg4,
5806 abi_long arg5, abi_long arg6, abi_long arg7,
5809 CPUState *cpu = ENV_GET_CPU(cpu_env);
5816 gemu_log("syscall %d", num);
5819 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5822 case TARGET_NR_exit:
5823 /* In old applications this may be used to implement _exit(2).
5824 However in threaded applictions it is used for thread termination,
5825 and _exit_group is used for application termination.
5826 Do thread termination if we have more then one thread. */
5827 /* FIXME: This probably breaks if a signal arrives. We should probably
5828 be disabling signals. */
5829 if (CPU_NEXT(first_cpu)) {
5833 /* Remove the CPU from the list. */
5834 QTAILQ_REMOVE(&cpus, cpu, node);
5837 if (ts->child_tidptr) {
5838 put_user_u32(0, ts->child_tidptr);
5839 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5843 object_unref(OBJECT(cpu));
5845 rcu_unregister_thread();
5851 gdb_exit(cpu_env, arg1);
5853 ret = 0; /* avoid warning */
5855 case TARGET_NR_read:
5859 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5861 ret = get_errno(read(arg1, p, arg3));
5863 fd_trans_host_to_target(arg1)) {
5864 ret = fd_trans_host_to_target(arg1)(p, ret);
5866 unlock_user(p, arg2, ret);
5869 case TARGET_NR_write:
5870 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5872 ret = get_errno(write(arg1, p, arg3));
5873 unlock_user(p, arg2, 0);
5875 #ifdef TARGET_NR_open
5876 case TARGET_NR_open:
5877 if (!(p = lock_user_string(arg1)))
5879 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5880 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5882 fd_trans_unregister(ret);
5883 unlock_user(p, arg1, 0);
5886 case TARGET_NR_openat:
5887 if (!(p = lock_user_string(arg2)))
5889 ret = get_errno(do_openat(cpu_env, arg1, p,
5890 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5892 fd_trans_unregister(ret);
5893 unlock_user(p, arg2, 0);
5895 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5896 case TARGET_NR_name_to_handle_at:
5897 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
5900 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5901 case TARGET_NR_open_by_handle_at:
5902 ret = do_open_by_handle_at(arg1, arg2, arg3);
5903 fd_trans_unregister(ret);
5906 case TARGET_NR_close:
5907 fd_trans_unregister(arg1);
5908 ret = get_errno(close(arg1));
5913 #ifdef TARGET_NR_fork
5914 case TARGET_NR_fork:
5915 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5918 #ifdef TARGET_NR_waitpid
5919 case TARGET_NR_waitpid:
5922 ret = get_errno(waitpid(arg1, &status, arg3));
5923 if (!is_error(ret) && arg2 && ret
5924 && put_user_s32(host_to_target_waitstatus(status), arg2))
5929 #ifdef TARGET_NR_waitid
5930 case TARGET_NR_waitid:
5934 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5935 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5936 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5938 host_to_target_siginfo(p, &info);
5939 unlock_user(p, arg3, sizeof(target_siginfo_t));
5944 #ifdef TARGET_NR_creat /* not on alpha */
5945 case TARGET_NR_creat:
5946 if (!(p = lock_user_string(arg1)))
5948 ret = get_errno(creat(p, arg2));
5949 fd_trans_unregister(ret);
5950 unlock_user(p, arg1, 0);
5953 #ifdef TARGET_NR_link
5954 case TARGET_NR_link:
5957 p = lock_user_string(arg1);
5958 p2 = lock_user_string(arg2);
5960 ret = -TARGET_EFAULT;
5962 ret = get_errno(link(p, p2));
5963 unlock_user(p2, arg2, 0);
5964 unlock_user(p, arg1, 0);
5968 #if defined(TARGET_NR_linkat)
5969 case TARGET_NR_linkat:
5974 p = lock_user_string(arg2);
5975 p2 = lock_user_string(arg4);
5977 ret = -TARGET_EFAULT;
5979 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5980 unlock_user(p, arg2, 0);
5981 unlock_user(p2, arg4, 0);
5985 #ifdef TARGET_NR_unlink
5986 case TARGET_NR_unlink:
5987 if (!(p = lock_user_string(arg1)))
5989 ret = get_errno(unlink(p));
5990 unlock_user(p, arg1, 0);
5993 #if defined(TARGET_NR_unlinkat)
5994 case TARGET_NR_unlinkat:
5995 if (!(p = lock_user_string(arg2)))
5997 ret = get_errno(unlinkat(arg1, p, arg3));
5998 unlock_user(p, arg2, 0);
6001 case TARGET_NR_execve:
6003 char **argp, **envp;
6006 abi_ulong guest_argp;
6007 abi_ulong guest_envp;
6014 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6015 if (get_user_ual(addr, gp))
6023 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6024 if (get_user_ual(addr, gp))
6031 argp = alloca((argc + 1) * sizeof(void *));
6032 envp = alloca((envc + 1) * sizeof(void *));
6034 for (gp = guest_argp, q = argp; gp;
6035 gp += sizeof(abi_ulong), q++) {
6036 if (get_user_ual(addr, gp))
6040 if (!(*q = lock_user_string(addr)))
6042 total_size += strlen(*q) + 1;
6046 for (gp = guest_envp, q = envp; gp;
6047 gp += sizeof(abi_ulong), q++) {
6048 if (get_user_ual(addr, gp))
6052 if (!(*q = lock_user_string(addr)))
6054 total_size += strlen(*q) + 1;
6058 if (!(p = lock_user_string(arg1)))
6060 ret = get_errno(execve(p, argp, envp));
6061 unlock_user(p, arg1, 0);
6066 ret = -TARGET_EFAULT;
6069 for (gp = guest_argp, q = argp; *q;
6070 gp += sizeof(abi_ulong), q++) {
6071 if (get_user_ual(addr, gp)
6074 unlock_user(*q, addr, 0);
6076 for (gp = guest_envp, q = envp; *q;
6077 gp += sizeof(abi_ulong), q++) {
6078 if (get_user_ual(addr, gp)
6081 unlock_user(*q, addr, 0);
6085 case TARGET_NR_chdir:
6086 if (!(p = lock_user_string(arg1)))
6088 ret = get_errno(chdir(p));
6089 unlock_user(p, arg1, 0);
6091 #ifdef TARGET_NR_time
6092 case TARGET_NR_time:
6095 ret = get_errno(time(&host_time));
6098 && put_user_sal(host_time, arg1))
6103 #ifdef TARGET_NR_mknod
6104 case TARGET_NR_mknod:
6105 if (!(p = lock_user_string(arg1)))
6107 ret = get_errno(mknod(p, arg2, arg3));
6108 unlock_user(p, arg1, 0);
6111 #if defined(TARGET_NR_mknodat)
6112 case TARGET_NR_mknodat:
6113 if (!(p = lock_user_string(arg2)))
6115 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6116 unlock_user(p, arg2, 0);
6119 #ifdef TARGET_NR_chmod
6120 case TARGET_NR_chmod:
6121 if (!(p = lock_user_string(arg1)))
6123 ret = get_errno(chmod(p, arg2));
6124 unlock_user(p, arg1, 0);
6127 #ifdef TARGET_NR_break
6128 case TARGET_NR_break:
6131 #ifdef TARGET_NR_oldstat
6132 case TARGET_NR_oldstat:
6135 case TARGET_NR_lseek:
6136 ret = get_errno(lseek(arg1, arg2, arg3));
6138 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6139 /* Alpha specific */
6140 case TARGET_NR_getxpid:
6141 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
6142 ret = get_errno(getpid());
6145 #ifdef TARGET_NR_getpid
6146 case TARGET_NR_getpid:
6147 ret = get_errno(getpid());
6150 case TARGET_NR_mount:
6152 /* need to look at the data field */
6156 p = lock_user_string(arg1);
6164 p2 = lock_user_string(arg2);
6167 unlock_user(p, arg1, 0);
6173 p3 = lock_user_string(arg3);
6176 unlock_user(p, arg1, 0);
6178 unlock_user(p2, arg2, 0);
6185 /* FIXME - arg5 should be locked, but it isn't clear how to
6186 * do that since it's not guaranteed to be a NULL-terminated
6190 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
6192 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
6194 ret = get_errno(ret);
6197 unlock_user(p, arg1, 0);
6199 unlock_user(p2, arg2, 0);
6201 unlock_user(p3, arg3, 0);
6205 #ifdef TARGET_NR_umount
6206 case TARGET_NR_umount:
6207 if (!(p = lock_user_string(arg1)))
6209 ret = get_errno(umount(p));
6210 unlock_user(p, arg1, 0);
6213 #ifdef TARGET_NR_stime /* not on alpha */
6214 case TARGET_NR_stime:
6217 if (get_user_sal(host_time, arg1))
6219 ret = get_errno(stime(&host_time));
6223 case TARGET_NR_ptrace:
6225 #ifdef TARGET_NR_alarm /* not on alpha */
6226 case TARGET_NR_alarm:
6230 #ifdef TARGET_NR_oldfstat
6231 case TARGET_NR_oldfstat:
6234 #ifdef TARGET_NR_pause /* not on alpha */
6235 case TARGET_NR_pause:
6236 ret = get_errno(pause());
6239 #ifdef TARGET_NR_utime
6240 case TARGET_NR_utime:
6242 struct utimbuf tbuf, *host_tbuf;
6243 struct target_utimbuf *target_tbuf;
6245 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
6247 tbuf.actime = tswapal(target_tbuf->actime);
6248 tbuf.modtime = tswapal(target_tbuf->modtime);
6249 unlock_user_struct(target_tbuf, arg2, 0);
6254 if (!(p = lock_user_string(arg1)))
6256 ret = get_errno(utime(p, host_tbuf));
6257 unlock_user(p, arg1, 0);
6261 #ifdef TARGET_NR_utimes
6262 case TARGET_NR_utimes:
6264 struct timeval *tvp, tv[2];
6266 if (copy_from_user_timeval(&tv[0], arg2)
6267 || copy_from_user_timeval(&tv[1],
6268 arg2 + sizeof(struct target_timeval)))
6274 if (!(p = lock_user_string(arg1)))
6276 ret = get_errno(utimes(p, tvp));
6277 unlock_user(p, arg1, 0);
6281 #if defined(TARGET_NR_futimesat)
6282 case TARGET_NR_futimesat:
6284 struct timeval *tvp, tv[2];
6286 if (copy_from_user_timeval(&tv[0], arg3)
6287 || copy_from_user_timeval(&tv[1],
6288 arg3 + sizeof(struct target_timeval)))
6294 if (!(p = lock_user_string(arg2)))
6296 ret = get_errno(futimesat(arg1, path(p), tvp));
6297 unlock_user(p, arg2, 0);
6301 #ifdef TARGET_NR_stty
6302 case TARGET_NR_stty:
6305 #ifdef TARGET_NR_gtty
6306 case TARGET_NR_gtty:
6309 #ifdef TARGET_NR_access
6310 case TARGET_NR_access:
6311 if (!(p = lock_user_string(arg1)))
6313 ret = get_errno(access(path(p), arg2));
6314 unlock_user(p, arg1, 0);
6317 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6318 case TARGET_NR_faccessat:
6319 if (!(p = lock_user_string(arg2)))
6321 ret = get_errno(faccessat(arg1, p, arg3, 0));
6322 unlock_user(p, arg2, 0);
6325 #ifdef TARGET_NR_nice /* not on alpha */
6326 case TARGET_NR_nice:
6327 ret = get_errno(nice(arg1));
6330 #ifdef TARGET_NR_ftime
6331 case TARGET_NR_ftime:
6334 case TARGET_NR_sync:
6338 case TARGET_NR_kill:
6339 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6341 #ifdef TARGET_NR_rename
6342 case TARGET_NR_rename:
6345 p = lock_user_string(arg1);
6346 p2 = lock_user_string(arg2);
6348 ret = -TARGET_EFAULT;
6350 ret = get_errno(rename(p, p2));
6351 unlock_user(p2, arg2, 0);
6352 unlock_user(p, arg1, 0);
6356 #if defined(TARGET_NR_renameat)
6357 case TARGET_NR_renameat:
6360 p = lock_user_string(arg2);
6361 p2 = lock_user_string(arg4);
6363 ret = -TARGET_EFAULT;
6365 ret = get_errno(renameat(arg1, p, arg3, p2));
6366 unlock_user(p2, arg4, 0);
6367 unlock_user(p, arg2, 0);
6371 #ifdef TARGET_NR_mkdir
6372 case TARGET_NR_mkdir:
6373 if (!(p = lock_user_string(arg1)))
6375 ret = get_errno(mkdir(p, arg2));
6376 unlock_user(p, arg1, 0);
6379 #if defined(TARGET_NR_mkdirat)
6380 case TARGET_NR_mkdirat:
6381 if (!(p = lock_user_string(arg2)))
6383 ret = get_errno(mkdirat(arg1, p, arg3));
6384 unlock_user(p, arg2, 0);
6387 #ifdef TARGET_NR_rmdir
6388 case TARGET_NR_rmdir:
6389 if (!(p = lock_user_string(arg1)))
6391 ret = get_errno(rmdir(p));
6392 unlock_user(p, arg1, 0);
6396 ret = get_errno(dup(arg1));
6398 fd_trans_dup(arg1, ret);
6401 #ifdef TARGET_NR_pipe
6402 case TARGET_NR_pipe:
6403 ret = do_pipe(cpu_env, arg1, 0, 0);
6406 #ifdef TARGET_NR_pipe2
6407 case TARGET_NR_pipe2:
6408 ret = do_pipe(cpu_env, arg1,
6409 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6412 case TARGET_NR_times:
6414 struct target_tms *tmsp;
6416 ret = get_errno(times(&tms));
6418 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6421 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6422 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6423 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6424 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6427 ret = host_to_target_clock_t(ret);
6430 #ifdef TARGET_NR_prof
6431 case TARGET_NR_prof:
6434 #ifdef TARGET_NR_signal
6435 case TARGET_NR_signal:
6438 case TARGET_NR_acct:
6440 ret = get_errno(acct(NULL));
6442 if (!(p = lock_user_string(arg1)))
6444 ret = get_errno(acct(path(p)));
6445 unlock_user(p, arg1, 0);
6448 #ifdef TARGET_NR_umount2
6449 case TARGET_NR_umount2:
6450 if (!(p = lock_user_string(arg1)))
6452 ret = get_errno(umount2(p, arg2));
6453 unlock_user(p, arg1, 0);
6456 #ifdef TARGET_NR_lock
6457 case TARGET_NR_lock:
6460 case TARGET_NR_ioctl:
6461 ret = do_ioctl(arg1, arg2, arg3);
6463 case TARGET_NR_fcntl:
6464 ret = do_fcntl(arg1, arg2, arg3);
6466 #ifdef TARGET_NR_mpx
6470 case TARGET_NR_setpgid:
6471 ret = get_errno(setpgid(arg1, arg2));
6473 #ifdef TARGET_NR_ulimit
6474 case TARGET_NR_ulimit:
6477 #ifdef TARGET_NR_oldolduname
6478 case TARGET_NR_oldolduname:
6481 case TARGET_NR_umask:
6482 ret = get_errno(umask(arg1));
6484 case TARGET_NR_chroot:
6485 if (!(p = lock_user_string(arg1)))
6487 ret = get_errno(chroot(p));
6488 unlock_user(p, arg1, 0);
6490 #ifdef TARGET_NR_ustat
6491 case TARGET_NR_ustat:
6494 #ifdef TARGET_NR_dup2
6495 case TARGET_NR_dup2:
6496 ret = get_errno(dup2(arg1, arg2));
6498 fd_trans_dup(arg1, arg2);
6502 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6503 case TARGET_NR_dup3:
6504 ret = get_errno(dup3(arg1, arg2, arg3));
6506 fd_trans_dup(arg1, arg2);
6510 #ifdef TARGET_NR_getppid /* not on alpha */
6511 case TARGET_NR_getppid:
6512 ret = get_errno(getppid());
6515 #ifdef TARGET_NR_getpgrp
6516 case TARGET_NR_getpgrp:
6517 ret = get_errno(getpgrp());
6520 case TARGET_NR_setsid:
6521 ret = get_errno(setsid());
6523 #ifdef TARGET_NR_sigaction
6524 case TARGET_NR_sigaction:
6526 #if defined(TARGET_ALPHA)
6527 struct target_sigaction act, oact, *pact = 0;
6528 struct target_old_sigaction *old_act;
6530 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6532 act._sa_handler = old_act->_sa_handler;
6533 target_siginitset(&act.sa_mask, old_act->sa_mask);
6534 act.sa_flags = old_act->sa_flags;
6535 act.sa_restorer = 0;
6536 unlock_user_struct(old_act, arg2, 0);
6539 ret = get_errno(do_sigaction(arg1, pact, &oact));
6540 if (!is_error(ret) && arg3) {
6541 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6543 old_act->_sa_handler = oact._sa_handler;
6544 old_act->sa_mask = oact.sa_mask.sig[0];
6545 old_act->sa_flags = oact.sa_flags;
6546 unlock_user_struct(old_act, arg3, 1);
6548 #elif defined(TARGET_MIPS)
6549 struct target_sigaction act, oact, *pact, *old_act;
6552 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6554 act._sa_handler = old_act->_sa_handler;
6555 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6556 act.sa_flags = old_act->sa_flags;
6557 unlock_user_struct(old_act, arg2, 0);
6563 ret = get_errno(do_sigaction(arg1, pact, &oact));
6565 if (!is_error(ret) && arg3) {
6566 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6568 old_act->_sa_handler = oact._sa_handler;
6569 old_act->sa_flags = oact.sa_flags;
6570 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6571 old_act->sa_mask.sig[1] = 0;
6572 old_act->sa_mask.sig[2] = 0;
6573 old_act->sa_mask.sig[3] = 0;
6574 unlock_user_struct(old_act, arg3, 1);
6577 struct target_old_sigaction *old_act;
6578 struct target_sigaction act, oact, *pact;
6580 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6582 act._sa_handler = old_act->_sa_handler;
6583 target_siginitset(&act.sa_mask, old_act->sa_mask);
6584 act.sa_flags = old_act->sa_flags;
6585 act.sa_restorer = old_act->sa_restorer;
6586 unlock_user_struct(old_act, arg2, 0);
6591 ret = get_errno(do_sigaction(arg1, pact, &oact));
6592 if (!is_error(ret) && arg3) {
6593 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6595 old_act->_sa_handler = oact._sa_handler;
6596 old_act->sa_mask = oact.sa_mask.sig[0];
6597 old_act->sa_flags = oact.sa_flags;
6598 old_act->sa_restorer = oact.sa_restorer;
6599 unlock_user_struct(old_act, arg3, 1);
6605 case TARGET_NR_rt_sigaction:
6607 #if defined(TARGET_ALPHA)
6608 struct target_sigaction act, oact, *pact = 0;
6609 struct target_rt_sigaction *rt_act;
6610 /* ??? arg4 == sizeof(sigset_t). */
6612 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6614 act._sa_handler = rt_act->_sa_handler;
6615 act.sa_mask = rt_act->sa_mask;
6616 act.sa_flags = rt_act->sa_flags;
6617 act.sa_restorer = arg5;
6618 unlock_user_struct(rt_act, arg2, 0);
6621 ret = get_errno(do_sigaction(arg1, pact, &oact));
6622 if (!is_error(ret) && arg3) {
6623 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6625 rt_act->_sa_handler = oact._sa_handler;
6626 rt_act->sa_mask = oact.sa_mask;
6627 rt_act->sa_flags = oact.sa_flags;
6628 unlock_user_struct(rt_act, arg3, 1);
6631 struct target_sigaction *act;
6632 struct target_sigaction *oact;
6635 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6640 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6641 ret = -TARGET_EFAULT;
6642 goto rt_sigaction_fail;
6646 ret = get_errno(do_sigaction(arg1, act, oact));
6649 unlock_user_struct(act, arg2, 0);
6651 unlock_user_struct(oact, arg3, 1);
6655 #ifdef TARGET_NR_sgetmask /* not on alpha */
6656 case TARGET_NR_sgetmask:
6659 abi_ulong target_set;
6660 do_sigprocmask(0, NULL, &cur_set);
6661 host_to_target_old_sigset(&target_set, &cur_set);
6666 #ifdef TARGET_NR_ssetmask /* not on alpha */
6667 case TARGET_NR_ssetmask:
6669 sigset_t set, oset, cur_set;
6670 abi_ulong target_set = arg1;
6671 do_sigprocmask(0, NULL, &cur_set);
6672 target_to_host_old_sigset(&set, &target_set);
6673 sigorset(&set, &set, &cur_set);
6674 do_sigprocmask(SIG_SETMASK, &set, &oset);
6675 host_to_target_old_sigset(&target_set, &oset);
6680 #ifdef TARGET_NR_sigprocmask
6681 case TARGET_NR_sigprocmask:
6683 #if defined(TARGET_ALPHA)
6684 sigset_t set, oldset;
6689 case TARGET_SIG_BLOCK:
6692 case TARGET_SIG_UNBLOCK:
6695 case TARGET_SIG_SETMASK:
6699 ret = -TARGET_EINVAL;
6703 target_to_host_old_sigset(&set, &mask);
6705 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6706 if (!is_error(ret)) {
6707 host_to_target_old_sigset(&mask, &oldset);
6709 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6712 sigset_t set, oldset, *set_ptr;
6717 case TARGET_SIG_BLOCK:
6720 case TARGET_SIG_UNBLOCK:
6723 case TARGET_SIG_SETMASK:
6727 ret = -TARGET_EINVAL;
6730 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6732 target_to_host_old_sigset(&set, p);
6733 unlock_user(p, arg2, 0);
6739 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6740 if (!is_error(ret) && arg3) {
6741 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6743 host_to_target_old_sigset(p, &oldset);
6744 unlock_user(p, arg3, sizeof(target_sigset_t));
6750 case TARGET_NR_rt_sigprocmask:
6753 sigset_t set, oldset, *set_ptr;
6757 case TARGET_SIG_BLOCK:
6760 case TARGET_SIG_UNBLOCK:
6763 case TARGET_SIG_SETMASK:
6767 ret = -TARGET_EINVAL;
6770 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6772 target_to_host_sigset(&set, p);
6773 unlock_user(p, arg2, 0);
6779 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6780 if (!is_error(ret) && arg3) {
6781 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6783 host_to_target_sigset(p, &oldset);
6784 unlock_user(p, arg3, sizeof(target_sigset_t));
6788 #ifdef TARGET_NR_sigpending
6789 case TARGET_NR_sigpending:
6792 ret = get_errno(sigpending(&set));
6793 if (!is_error(ret)) {
6794 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6796 host_to_target_old_sigset(p, &set);
6797 unlock_user(p, arg1, sizeof(target_sigset_t));
6802 case TARGET_NR_rt_sigpending:
6805 ret = get_errno(sigpending(&set));
6806 if (!is_error(ret)) {
6807 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6809 host_to_target_sigset(p, &set);
6810 unlock_user(p, arg1, sizeof(target_sigset_t));
6814 #ifdef TARGET_NR_sigsuspend
6815 case TARGET_NR_sigsuspend:
6818 #if defined(TARGET_ALPHA)
6819 abi_ulong mask = arg1;
6820 target_to_host_old_sigset(&set, &mask);
6822 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6824 target_to_host_old_sigset(&set, p);
6825 unlock_user(p, arg1, 0);
6827 ret = get_errno(sigsuspend(&set));
6831 case TARGET_NR_rt_sigsuspend:
6834 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6836 target_to_host_sigset(&set, p);
6837 unlock_user(p, arg1, 0);
6838 ret = get_errno(sigsuspend(&set));
6841 case TARGET_NR_rt_sigtimedwait:
6844 struct timespec uts, *puts;
6847 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6849 target_to_host_sigset(&set, p);
6850 unlock_user(p, arg1, 0);
6853 target_to_host_timespec(puts, arg3);
6857 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6858 if (!is_error(ret)) {
6860 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6865 host_to_target_siginfo(p, &uinfo);
6866 unlock_user(p, arg2, sizeof(target_siginfo_t));
6868 ret = host_to_target_signal(ret);
6872 case TARGET_NR_rt_sigqueueinfo:
6875 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6877 target_to_host_siginfo(&uinfo, p);
6878 unlock_user(p, arg1, 0);
6879 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6882 #ifdef TARGET_NR_sigreturn
6883 case TARGET_NR_sigreturn:
6884 /* NOTE: ret is eax, so not transcoding must be done */
6885 ret = do_sigreturn(cpu_env);
6888 case TARGET_NR_rt_sigreturn:
6889 /* NOTE: ret is eax, so not transcoding must be done */
6890 ret = do_rt_sigreturn(cpu_env);
6892 case TARGET_NR_sethostname:
6893 if (!(p = lock_user_string(arg1)))
6895 ret = get_errno(sethostname(p, arg2));
6896 unlock_user(p, arg1, 0);
6898 case TARGET_NR_setrlimit:
6900 int resource = target_to_host_resource(arg1);
6901 struct target_rlimit *target_rlim;
6903 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6905 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6906 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6907 unlock_user_struct(target_rlim, arg2, 0);
6908 ret = get_errno(setrlimit(resource, &rlim));
6911 case TARGET_NR_getrlimit:
6913 int resource = target_to_host_resource(arg1);
6914 struct target_rlimit *target_rlim;
6917 ret = get_errno(getrlimit(resource, &rlim));
6918 if (!is_error(ret)) {
6919 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6921 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6922 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6923 unlock_user_struct(target_rlim, arg2, 1);
6927 case TARGET_NR_getrusage:
6929 struct rusage rusage;
6930 ret = get_errno(getrusage(arg1, &rusage));
6931 if (!is_error(ret)) {
6932 ret = host_to_target_rusage(arg2, &rusage);
6936 case TARGET_NR_gettimeofday:
6939 ret = get_errno(gettimeofday(&tv, NULL));
6940 if (!is_error(ret)) {
6941 if (copy_to_user_timeval(arg1, &tv))
6946 case TARGET_NR_settimeofday:
6948 struct timeval tv, *ptv = NULL;
6949 struct timezone tz, *ptz = NULL;
6952 if (copy_from_user_timeval(&tv, arg1)) {
6959 if (copy_from_user_timezone(&tz, arg2)) {
6965 ret = get_errno(settimeofday(ptv, ptz));
6968 #if defined(TARGET_NR_select)
6969 case TARGET_NR_select:
6970 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6971 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6974 struct target_sel_arg_struct *sel;
6975 abi_ulong inp, outp, exp, tvp;
6978 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6980 nsel = tswapal(sel->n);
6981 inp = tswapal(sel->inp);
6982 outp = tswapal(sel->outp);
6983 exp = tswapal(sel->exp);
6984 tvp = tswapal(sel->tvp);
6985 unlock_user_struct(sel, arg1, 0);
6986 ret = do_select(nsel, inp, outp, exp, tvp);
6991 #ifdef TARGET_NR_pselect6
6992 case TARGET_NR_pselect6:
6994 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6995 fd_set rfds, wfds, efds;
6996 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6997 struct timespec ts, *ts_ptr;
7000 * The 6th arg is actually two args smashed together,
7001 * so we cannot use the C library.
7009 abi_ulong arg_sigset, arg_sigsize, *arg7;
7010 target_sigset_t *target_sigset;
7018 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7022 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7026 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7032 * This takes a timespec, and not a timeval, so we cannot
7033 * use the do_select() helper ...
7036 if (target_to_host_timespec(&ts, ts_addr)) {
7044 /* Extract the two packed args for the sigset */
7047 sig.size = _NSIG / 8;
7049 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7053 arg_sigset = tswapal(arg7[0]);
7054 arg_sigsize = tswapal(arg7[1]);
7055 unlock_user(arg7, arg6, 0);
7059 if (arg_sigsize != sizeof(*target_sigset)) {
7060 /* Like the kernel, we enforce correct size sigsets */
7061 ret = -TARGET_EINVAL;
7064 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7065 sizeof(*target_sigset), 1);
7066 if (!target_sigset) {
7069 target_to_host_sigset(&set, target_sigset);
7070 unlock_user(target_sigset, arg_sigset, 0);
7078 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7081 if (!is_error(ret)) {
7082 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7084 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7086 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7089 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7095 #ifdef TARGET_NR_symlink
7096 case TARGET_NR_symlink:
7099 p = lock_user_string(arg1);
7100 p2 = lock_user_string(arg2);
7102 ret = -TARGET_EFAULT;
7104 ret = get_errno(symlink(p, p2));
7105 unlock_user(p2, arg2, 0);
7106 unlock_user(p, arg1, 0);
7110 #if defined(TARGET_NR_symlinkat)
7111 case TARGET_NR_symlinkat:
7114 p = lock_user_string(arg1);
7115 p2 = lock_user_string(arg3);
7117 ret = -TARGET_EFAULT;
7119 ret = get_errno(symlinkat(p, arg2, p2));
7120 unlock_user(p2, arg3, 0);
7121 unlock_user(p, arg1, 0);
7125 #ifdef TARGET_NR_oldlstat
7126 case TARGET_NR_oldlstat:
7129 #ifdef TARGET_NR_readlink
7130 case TARGET_NR_readlink:
7133 p = lock_user_string(arg1);
7134 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7136 ret = -TARGET_EFAULT;
7138 /* Short circuit this for the magic exe check. */
7139 ret = -TARGET_EINVAL;
7140 } else if (is_proc_myself((const char *)p, "exe")) {
7141 char real[PATH_MAX], *temp;
7142 temp = realpath(exec_path, real);
7143 /* Return value is # of bytes that we wrote to the buffer. */
7145 ret = get_errno(-1);
7147 /* Don't worry about sign mismatch as earlier mapping
7148 * logic would have thrown a bad address error. */
7149 ret = MIN(strlen(real), arg3);
7150 /* We cannot NUL terminate the string. */
7151 memcpy(p2, real, ret);
7154 ret = get_errno(readlink(path(p), p2, arg3));
7156 unlock_user(p2, arg2, ret);
7157 unlock_user(p, arg1, 0);
7161 #if defined(TARGET_NR_readlinkat)
7162 case TARGET_NR_readlinkat:
7165 p = lock_user_string(arg2);
7166 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7168 ret = -TARGET_EFAULT;
7169 } else if (is_proc_myself((const char *)p, "exe")) {
7170 char real[PATH_MAX], *temp;
7171 temp = realpath(exec_path, real);
7172 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
7173 snprintf((char *)p2, arg4, "%s", real);
7175 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
7177 unlock_user(p2, arg3, ret);
7178 unlock_user(p, arg2, 0);
7182 #ifdef TARGET_NR_uselib
7183 case TARGET_NR_uselib:
7186 #ifdef TARGET_NR_swapon
7187 case TARGET_NR_swapon:
7188 if (!(p = lock_user_string(arg1)))
7190 ret = get_errno(swapon(p, arg2));
7191 unlock_user(p, arg1, 0);
7194 case TARGET_NR_reboot:
7195 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
7196 /* arg4 must be ignored in all other cases */
7197 p = lock_user_string(arg4);
7201 ret = get_errno(reboot(arg1, arg2, arg3, p));
7202 unlock_user(p, arg4, 0);
7204 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
7207 #ifdef TARGET_NR_readdir
7208 case TARGET_NR_readdir:
7211 #ifdef TARGET_NR_mmap
7212 case TARGET_NR_mmap:
7213 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7214 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7215 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7216 || defined(TARGET_S390X)
7219 abi_ulong v1, v2, v3, v4, v5, v6;
7220 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
7228 unlock_user(v, arg1, 0);
7229 ret = get_errno(target_mmap(v1, v2, v3,
7230 target_to_host_bitmask(v4, mmap_flags_tbl),
7234 ret = get_errno(target_mmap(arg1, arg2, arg3,
7235 target_to_host_bitmask(arg4, mmap_flags_tbl),
7241 #ifdef TARGET_NR_mmap2
7242 case TARGET_NR_mmap2:
7244 #define MMAP_SHIFT 12
7246 ret = get_errno(target_mmap(arg1, arg2, arg3,
7247 target_to_host_bitmask(arg4, mmap_flags_tbl),
7249 arg6 << MMAP_SHIFT));
7252 case TARGET_NR_munmap:
7253 ret = get_errno(target_munmap(arg1, arg2));
7255 case TARGET_NR_mprotect:
7257 TaskState *ts = cpu->opaque;
7258 /* Special hack to detect libc making the stack executable. */
7259 if ((arg3 & PROT_GROWSDOWN)
7260 && arg1 >= ts->info->stack_limit
7261 && arg1 <= ts->info->start_stack) {
7262 arg3 &= ~PROT_GROWSDOWN;
7263 arg2 = arg2 + arg1 - ts->info->stack_limit;
7264 arg1 = ts->info->stack_limit;
7267 ret = get_errno(target_mprotect(arg1, arg2, arg3));
7269 #ifdef TARGET_NR_mremap
7270 case TARGET_NR_mremap:
7271 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
7274 /* ??? msync/mlock/munlock are broken for softmmu. */
7275 #ifdef TARGET_NR_msync
7276 case TARGET_NR_msync:
7277 ret = get_errno(msync(g2h(arg1), arg2, arg3));
7280 #ifdef TARGET_NR_mlock
7281 case TARGET_NR_mlock:
7282 ret = get_errno(mlock(g2h(arg1), arg2));
7285 #ifdef TARGET_NR_munlock
7286 case TARGET_NR_munlock:
7287 ret = get_errno(munlock(g2h(arg1), arg2));
7290 #ifdef TARGET_NR_mlockall
7291 case TARGET_NR_mlockall:
7292 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
7295 #ifdef TARGET_NR_munlockall
7296 case TARGET_NR_munlockall:
7297 ret = get_errno(munlockall());
7300 case TARGET_NR_truncate:
7301 if (!(p = lock_user_string(arg1)))
7303 ret = get_errno(truncate(p, arg2));
7304 unlock_user(p, arg1, 0);
7306 case TARGET_NR_ftruncate:
7307 ret = get_errno(ftruncate(arg1, arg2));
7309 case TARGET_NR_fchmod:
7310 ret = get_errno(fchmod(arg1, arg2));
7312 #if defined(TARGET_NR_fchmodat)
7313 case TARGET_NR_fchmodat:
7314 if (!(p = lock_user_string(arg2)))
7316 ret = get_errno(fchmodat(arg1, p, arg3, 0));
7317 unlock_user(p, arg2, 0);
7320 case TARGET_NR_getpriority:
7321 /* Note that negative values are valid for getpriority, so we must
7322 differentiate based on errno settings. */
7324 ret = getpriority(arg1, arg2);
7325 if (ret == -1 && errno != 0) {
7326 ret = -host_to_target_errno(errno);
7330 /* Return value is the unbiased priority. Signal no error. */
7331 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
7333 /* Return value is a biased priority to avoid negative numbers. */
7337 case TARGET_NR_setpriority:
7338 ret = get_errno(setpriority(arg1, arg2, arg3));
7340 #ifdef TARGET_NR_profil
7341 case TARGET_NR_profil:
7344 case TARGET_NR_statfs:
7345 if (!(p = lock_user_string(arg1)))
7347 ret = get_errno(statfs(path(p), &stfs));
7348 unlock_user(p, arg1, 0);
7350 if (!is_error(ret)) {
7351 struct target_statfs *target_stfs;
7353 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7355 __put_user(stfs.f_type, &target_stfs->f_type);
7356 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7357 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7358 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7359 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7360 __put_user(stfs.f_files, &target_stfs->f_files);
7361 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7362 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7363 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7364 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7365 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7366 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7367 unlock_user_struct(target_stfs, arg2, 1);
7370 case TARGET_NR_fstatfs:
7371 ret = get_errno(fstatfs(arg1, &stfs));
7372 goto convert_statfs;
7373 #ifdef TARGET_NR_statfs64
7374 case TARGET_NR_statfs64:
7375 if (!(p = lock_user_string(arg1)))
7377 ret = get_errno(statfs(path(p), &stfs));
7378 unlock_user(p, arg1, 0);
7380 if (!is_error(ret)) {
7381 struct target_statfs64 *target_stfs;
7383 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7385 __put_user(stfs.f_type, &target_stfs->f_type);
7386 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7387 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7388 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7389 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7390 __put_user(stfs.f_files, &target_stfs->f_files);
7391 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7392 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7393 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7394 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7395 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7396 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7397 unlock_user_struct(target_stfs, arg3, 1);
7400 case TARGET_NR_fstatfs64:
7401 ret = get_errno(fstatfs(arg1, &stfs));
7402 goto convert_statfs64;
7404 #ifdef TARGET_NR_ioperm
7405 case TARGET_NR_ioperm:
7408 #ifdef TARGET_NR_socketcall
7409 case TARGET_NR_socketcall:
7410 ret = do_socketcall(arg1, arg2);
7413 #ifdef TARGET_NR_accept
7414 case TARGET_NR_accept:
7415 ret = do_accept4(arg1, arg2, arg3, 0);
7418 #ifdef TARGET_NR_accept4
7419 case TARGET_NR_accept4:
7420 #ifdef CONFIG_ACCEPT4
7421 ret = do_accept4(arg1, arg2, arg3, arg4);
7427 #ifdef TARGET_NR_bind
7428 case TARGET_NR_bind:
7429 ret = do_bind(arg1, arg2, arg3);
7432 #ifdef TARGET_NR_connect
7433 case TARGET_NR_connect:
7434 ret = do_connect(arg1, arg2, arg3);
7437 #ifdef TARGET_NR_getpeername
7438 case TARGET_NR_getpeername:
7439 ret = do_getpeername(arg1, arg2, arg3);
7442 #ifdef TARGET_NR_getsockname
7443 case TARGET_NR_getsockname:
7444 ret = do_getsockname(arg1, arg2, arg3);
7447 #ifdef TARGET_NR_getsockopt
7448 case TARGET_NR_getsockopt:
7449 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7452 #ifdef TARGET_NR_listen
7453 case TARGET_NR_listen:
7454 ret = get_errno(listen(arg1, arg2));
7457 #ifdef TARGET_NR_recv
7458 case TARGET_NR_recv:
7459 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7462 #ifdef TARGET_NR_recvfrom
7463 case TARGET_NR_recvfrom:
7464 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7467 #ifdef TARGET_NR_recvmsg
7468 case TARGET_NR_recvmsg:
7469 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7472 #ifdef TARGET_NR_send
7473 case TARGET_NR_send:
7474 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7477 #ifdef TARGET_NR_sendmsg
7478 case TARGET_NR_sendmsg:
7479 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7482 #ifdef TARGET_NR_sendmmsg
7483 case TARGET_NR_sendmmsg:
7484 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7486 case TARGET_NR_recvmmsg:
7487 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7490 #ifdef TARGET_NR_sendto
7491 case TARGET_NR_sendto:
7492 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7495 #ifdef TARGET_NR_shutdown
7496 case TARGET_NR_shutdown:
7497 ret = get_errno(shutdown(arg1, arg2));
7500 #ifdef TARGET_NR_socket
7501 case TARGET_NR_socket:
7502 ret = do_socket(arg1, arg2, arg3);
7503 fd_trans_unregister(ret);
7506 #ifdef TARGET_NR_socketpair
7507 case TARGET_NR_socketpair:
7508 ret = do_socketpair(arg1, arg2, arg3, arg4);
7511 #ifdef TARGET_NR_setsockopt
7512 case TARGET_NR_setsockopt:
7513 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7517 case TARGET_NR_syslog:
7518 if (!(p = lock_user_string(arg2)))
7520 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7521 unlock_user(p, arg2, 0);
7524 case TARGET_NR_setitimer:
7526 struct itimerval value, ovalue, *pvalue;
7530 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7531 || copy_from_user_timeval(&pvalue->it_value,
7532 arg2 + sizeof(struct target_timeval)))
7537 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7538 if (!is_error(ret) && arg3) {
7539 if (copy_to_user_timeval(arg3,
7540 &ovalue.it_interval)
7541 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7547 case TARGET_NR_getitimer:
7549 struct itimerval value;
7551 ret = get_errno(getitimer(arg1, &value));
7552 if (!is_error(ret) && arg2) {
7553 if (copy_to_user_timeval(arg2,
7555 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7561 #ifdef TARGET_NR_stat
7562 case TARGET_NR_stat:
7563 if (!(p = lock_user_string(arg1)))
7565 ret = get_errno(stat(path(p), &st));
7566 unlock_user(p, arg1, 0);
7569 #ifdef TARGET_NR_lstat
7570 case TARGET_NR_lstat:
7571 if (!(p = lock_user_string(arg1)))
7573 ret = get_errno(lstat(path(p), &st));
7574 unlock_user(p, arg1, 0);
7577 case TARGET_NR_fstat:
7579 ret = get_errno(fstat(arg1, &st));
7580 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7583 if (!is_error(ret)) {
7584 struct target_stat *target_st;
7586 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7588 memset(target_st, 0, sizeof(*target_st));
7589 __put_user(st.st_dev, &target_st->st_dev);
7590 __put_user(st.st_ino, &target_st->st_ino);
7591 __put_user(st.st_mode, &target_st->st_mode);
7592 __put_user(st.st_uid, &target_st->st_uid);
7593 __put_user(st.st_gid, &target_st->st_gid);
7594 __put_user(st.st_nlink, &target_st->st_nlink);
7595 __put_user(st.st_rdev, &target_st->st_rdev);
7596 __put_user(st.st_size, &target_st->st_size);
7597 __put_user(st.st_blksize, &target_st->st_blksize);
7598 __put_user(st.st_blocks, &target_st->st_blocks);
7599 __put_user(st.st_atime, &target_st->target_st_atime);
7600 __put_user(st.st_mtime, &target_st->target_st_mtime);
7601 __put_user(st.st_ctime, &target_st->target_st_ctime);
7602 unlock_user_struct(target_st, arg2, 1);
7606 #ifdef TARGET_NR_olduname
7607 case TARGET_NR_olduname:
7610 #ifdef TARGET_NR_iopl
7611 case TARGET_NR_iopl:
7614 case TARGET_NR_vhangup:
7615 ret = get_errno(vhangup());
7617 #ifdef TARGET_NR_idle
7618 case TARGET_NR_idle:
7621 #ifdef TARGET_NR_syscall
7622 case TARGET_NR_syscall:
7623 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7624 arg6, arg7, arg8, 0);
7627 case TARGET_NR_wait4:
7630 abi_long status_ptr = arg2;
7631 struct rusage rusage, *rusage_ptr;
7632 abi_ulong target_rusage = arg4;
7633 abi_long rusage_err;
7635 rusage_ptr = &rusage;
7638 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7639 if (!is_error(ret)) {
7640 if (status_ptr && ret) {
7641 status = host_to_target_waitstatus(status);
7642 if (put_user_s32(status, status_ptr))
7645 if (target_rusage) {
7646 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7654 #ifdef TARGET_NR_swapoff
7655 case TARGET_NR_swapoff:
7656 if (!(p = lock_user_string(arg1)))
7658 ret = get_errno(swapoff(p));
7659 unlock_user(p, arg1, 0);
7662 case TARGET_NR_sysinfo:
7664 struct target_sysinfo *target_value;
7665 struct sysinfo value;
7666 ret = get_errno(sysinfo(&value));
7667 if (!is_error(ret) && arg1)
7669 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7671 __put_user(value.uptime, &target_value->uptime);
7672 __put_user(value.loads[0], &target_value->loads[0]);
7673 __put_user(value.loads[1], &target_value->loads[1]);
7674 __put_user(value.loads[2], &target_value->loads[2]);
7675 __put_user(value.totalram, &target_value->totalram);
7676 __put_user(value.freeram, &target_value->freeram);
7677 __put_user(value.sharedram, &target_value->sharedram);
7678 __put_user(value.bufferram, &target_value->bufferram);
7679 __put_user(value.totalswap, &target_value->totalswap);
7680 __put_user(value.freeswap, &target_value->freeswap);
7681 __put_user(value.procs, &target_value->procs);
7682 __put_user(value.totalhigh, &target_value->totalhigh);
7683 __put_user(value.freehigh, &target_value->freehigh);
7684 __put_user(value.mem_unit, &target_value->mem_unit);
7685 unlock_user_struct(target_value, arg1, 1);
7689 #ifdef TARGET_NR_ipc
7691 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7694 #ifdef TARGET_NR_semget
7695 case TARGET_NR_semget:
7696 ret = get_errno(semget(arg1, arg2, arg3));
7699 #ifdef TARGET_NR_semop
7700 case TARGET_NR_semop:
7701 ret = do_semop(arg1, arg2, arg3);
7704 #ifdef TARGET_NR_semctl
7705 case TARGET_NR_semctl:
7706 ret = do_semctl(arg1, arg2, arg3, arg4);
7709 #ifdef TARGET_NR_msgctl
7710 case TARGET_NR_msgctl:
7711 ret = do_msgctl(arg1, arg2, arg3);
7714 #ifdef TARGET_NR_msgget
7715 case TARGET_NR_msgget:
7716 ret = get_errno(msgget(arg1, arg2));
7719 #ifdef TARGET_NR_msgrcv
7720 case TARGET_NR_msgrcv:
7721 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7724 #ifdef TARGET_NR_msgsnd
7725 case TARGET_NR_msgsnd:
7726 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7729 #ifdef TARGET_NR_shmget
7730 case TARGET_NR_shmget:
7731 ret = get_errno(shmget(arg1, arg2, arg3));
7734 #ifdef TARGET_NR_shmctl
7735 case TARGET_NR_shmctl:
7736 ret = do_shmctl(arg1, arg2, arg3);
7739 #ifdef TARGET_NR_shmat
7740 case TARGET_NR_shmat:
7741 ret = do_shmat(arg1, arg2, arg3);
7744 #ifdef TARGET_NR_shmdt
7745 case TARGET_NR_shmdt:
7746 ret = do_shmdt(arg1);
7749 case TARGET_NR_fsync:
7750 ret = get_errno(fsync(arg1));
7752 case TARGET_NR_clone:
7753 /* Linux manages to have three different orderings for its
7754 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7755 * match the kernel's CONFIG_CLONE_* settings.
7756 * Microblaze is further special in that it uses a sixth
7757 * implicit argument to clone for the TLS pointer.
7759 #if defined(TARGET_MICROBLAZE)
7760 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7761 #elif defined(TARGET_CLONE_BACKWARDS)
7762 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7763 #elif defined(TARGET_CLONE_BACKWARDS2)
7764 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7766 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7769 #ifdef __NR_exit_group
7770 /* new thread calls */
7771 case TARGET_NR_exit_group:
7775 gdb_exit(cpu_env, arg1);
7776 ret = get_errno(exit_group(arg1));
7779 case TARGET_NR_setdomainname:
7780 if (!(p = lock_user_string(arg1)))
7782 ret = get_errno(setdomainname(p, arg2));
7783 unlock_user(p, arg1, 0);
7785 case TARGET_NR_uname:
7786 /* no need to transcode because we use the linux syscall */
7788 struct new_utsname * buf;
7790 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7792 ret = get_errno(sys_uname(buf));
7793 if (!is_error(ret)) {
7794 /* Overrite the native machine name with whatever is being
7796 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7797 /* Allow the user to override the reported release. */
7798 if (qemu_uname_release && *qemu_uname_release)
7799 strcpy (buf->release, qemu_uname_release);
7801 unlock_user_struct(buf, arg1, 1);
7805 case TARGET_NR_modify_ldt:
7806 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7808 #if !defined(TARGET_X86_64)
7809 case TARGET_NR_vm86old:
7811 case TARGET_NR_vm86:
7812 ret = do_vm86(cpu_env, arg1, arg2);
7816 case TARGET_NR_adjtimex:
7818 #ifdef TARGET_NR_create_module
7819 case TARGET_NR_create_module:
7821 case TARGET_NR_init_module:
7822 case TARGET_NR_delete_module:
7823 #ifdef TARGET_NR_get_kernel_syms
7824 case TARGET_NR_get_kernel_syms:
7827 case TARGET_NR_quotactl:
7829 case TARGET_NR_getpgid:
7830 ret = get_errno(getpgid(arg1));
7832 case TARGET_NR_fchdir:
7833 ret = get_errno(fchdir(arg1));
7835 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7836 case TARGET_NR_bdflush:
7839 #ifdef TARGET_NR_sysfs
7840 case TARGET_NR_sysfs:
7843 case TARGET_NR_personality:
7844 ret = get_errno(personality(arg1));
7846 #ifdef TARGET_NR_afs_syscall
7847 case TARGET_NR_afs_syscall:
7850 #ifdef TARGET_NR__llseek /* Not on alpha */
7851 case TARGET_NR__llseek:
7854 #if !defined(__NR_llseek)
7855 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7857 ret = get_errno(res);
7862 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7864 if ((ret == 0) && put_user_s64(res, arg4)) {
7870 #ifdef TARGET_NR_getdents
7871 case TARGET_NR_getdents:
7872 #ifdef __NR_getdents
7873 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7875 struct target_dirent *target_dirp;
7876 struct linux_dirent *dirp;
7877 abi_long count = arg3;
7879 dirp = malloc(count);
7881 ret = -TARGET_ENOMEM;
7885 ret = get_errno(sys_getdents(arg1, dirp, count));
7886 if (!is_error(ret)) {
7887 struct linux_dirent *de;
7888 struct target_dirent *tde;
7890 int reclen, treclen;
7891 int count1, tnamelen;
7895 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7899 reclen = de->d_reclen;
7900 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7901 assert(tnamelen >= 0);
7902 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7903 assert(count1 + treclen <= count);
7904 tde->d_reclen = tswap16(treclen);
7905 tde->d_ino = tswapal(de->d_ino);
7906 tde->d_off = tswapal(de->d_off);
7907 memcpy(tde->d_name, de->d_name, tnamelen);
7908 de = (struct linux_dirent *)((char *)de + reclen);
7910 tde = (struct target_dirent *)((char *)tde + treclen);
7914 unlock_user(target_dirp, arg2, ret);
7920 struct linux_dirent *dirp;
7921 abi_long count = arg3;
7923 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7925 ret = get_errno(sys_getdents(arg1, dirp, count));
7926 if (!is_error(ret)) {
7927 struct linux_dirent *de;
7932 reclen = de->d_reclen;
7935 de->d_reclen = tswap16(reclen);
7936 tswapls(&de->d_ino);
7937 tswapls(&de->d_off);
7938 de = (struct linux_dirent *)((char *)de + reclen);
7942 unlock_user(dirp, arg2, ret);
7946 /* Implement getdents in terms of getdents64 */
7948 struct linux_dirent64 *dirp;
7949 abi_long count = arg3;
7951 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7955 ret = get_errno(sys_getdents64(arg1, dirp, count));
7956 if (!is_error(ret)) {
7957 /* Convert the dirent64 structs to target dirent. We do this
7958 * in-place, since we can guarantee that a target_dirent is no
7959 * larger than a dirent64; however this means we have to be
7960 * careful to read everything before writing in the new format.
7962 struct linux_dirent64 *de;
7963 struct target_dirent *tde;
7968 tde = (struct target_dirent *)dirp;
7970 int namelen, treclen;
7971 int reclen = de->d_reclen;
7972 uint64_t ino = de->d_ino;
7973 int64_t off = de->d_off;
7974 uint8_t type = de->d_type;
7976 namelen = strlen(de->d_name);
7977 treclen = offsetof(struct target_dirent, d_name)
7979 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7981 memmove(tde->d_name, de->d_name, namelen + 1);
7982 tde->d_ino = tswapal(ino);
7983 tde->d_off = tswapal(off);
7984 tde->d_reclen = tswap16(treclen);
7985 /* The target_dirent type is in what was formerly a padding
7986 * byte at the end of the structure:
7988 *(((char *)tde) + treclen - 1) = type;
7990 de = (struct linux_dirent64 *)((char *)de + reclen);
7991 tde = (struct target_dirent *)((char *)tde + treclen);
7997 unlock_user(dirp, arg2, ret);
8001 #endif /* TARGET_NR_getdents */
8002 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8003 case TARGET_NR_getdents64:
8005 struct linux_dirent64 *dirp;
8006 abi_long count = arg3;
8007 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8009 ret = get_errno(sys_getdents64(arg1, dirp, count));
8010 if (!is_error(ret)) {
8011 struct linux_dirent64 *de;
8016 reclen = de->d_reclen;
8019 de->d_reclen = tswap16(reclen);
8020 tswap64s((uint64_t *)&de->d_ino);
8021 tswap64s((uint64_t *)&de->d_off);
8022 de = (struct linux_dirent64 *)((char *)de + reclen);
8026 unlock_user(dirp, arg2, ret);
8029 #endif /* TARGET_NR_getdents64 */
8030 #if defined(TARGET_NR__newselect)
8031 case TARGET_NR__newselect:
8032 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8035 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8036 # ifdef TARGET_NR_poll
8037 case TARGET_NR_poll:
8039 # ifdef TARGET_NR_ppoll
8040 case TARGET_NR_ppoll:
8043 struct target_pollfd *target_pfd;
8044 unsigned int nfds = arg2;
8049 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
8053 pfd = alloca(sizeof(struct pollfd) * nfds);
8054 for(i = 0; i < nfds; i++) {
8055 pfd[i].fd = tswap32(target_pfd[i].fd);
8056 pfd[i].events = tswap16(target_pfd[i].events);
8059 # ifdef TARGET_NR_ppoll
8060 if (num == TARGET_NR_ppoll) {
8061 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8062 target_sigset_t *target_set;
8063 sigset_t _set, *set = &_set;
8066 if (target_to_host_timespec(timeout_ts, arg3)) {
8067 unlock_user(target_pfd, arg1, 0);
8075 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8077 unlock_user(target_pfd, arg1, 0);
8080 target_to_host_sigset(set, target_set);
8085 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
8087 if (!is_error(ret) && arg3) {
8088 host_to_target_timespec(arg3, timeout_ts);
8091 unlock_user(target_set, arg4, 0);
8095 ret = get_errno(poll(pfd, nfds, timeout));
8097 if (!is_error(ret)) {
8098 for(i = 0; i < nfds; i++) {
8099 target_pfd[i].revents = tswap16(pfd[i].revents);
8102 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
8106 case TARGET_NR_flock:
8107 /* NOTE: the flock constant seems to be the same for every
8109 ret = get_errno(flock(arg1, arg2));
8111 case TARGET_NR_readv:
8113 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
8115 ret = get_errno(readv(arg1, vec, arg3));
8116 unlock_iovec(vec, arg2, arg3, 1);
8118 ret = -host_to_target_errno(errno);
8122 case TARGET_NR_writev:
8124 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8126 ret = get_errno(writev(arg1, vec, arg3));
8127 unlock_iovec(vec, arg2, arg3, 0);
8129 ret = -host_to_target_errno(errno);
8133 case TARGET_NR_getsid:
8134 ret = get_errno(getsid(arg1));
8136 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8137 case TARGET_NR_fdatasync:
8138 ret = get_errno(fdatasync(arg1));
8141 #ifdef TARGET_NR__sysctl
8142 case TARGET_NR__sysctl:
8143 /* We don't implement this, but ENOTDIR is always a safe
8145 ret = -TARGET_ENOTDIR;
8148 case TARGET_NR_sched_getaffinity:
8150 unsigned int mask_size;
8151 unsigned long *mask;
8154 * sched_getaffinity needs multiples of ulong, so need to take
8155 * care of mismatches between target ulong and host ulong sizes.
8157 if (arg2 & (sizeof(abi_ulong) - 1)) {
8158 ret = -TARGET_EINVAL;
8161 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8163 mask = alloca(mask_size);
8164 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
8166 if (!is_error(ret)) {
8168 /* More data returned than the caller's buffer will fit.
8169 * This only happens if sizeof(abi_long) < sizeof(long)
8170 * and the caller passed us a buffer holding an odd number
8171 * of abi_longs. If the host kernel is actually using the
8172 * extra 4 bytes then fail EINVAL; otherwise we can just
8173 * ignore them and only copy the interesting part.
8175 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
8176 if (numcpus > arg2 * 8) {
8177 ret = -TARGET_EINVAL;
8183 if (copy_to_user(arg3, mask, ret)) {
8189 case TARGET_NR_sched_setaffinity:
8191 unsigned int mask_size;
8192 unsigned long *mask;
8195 * sched_setaffinity needs multiples of ulong, so need to take
8196 * care of mismatches between target ulong and host ulong sizes.
8198 if (arg2 & (sizeof(abi_ulong) - 1)) {
8199 ret = -TARGET_EINVAL;
8202 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8204 mask = alloca(mask_size);
8205 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
8208 memcpy(mask, p, arg2);
8209 unlock_user_struct(p, arg2, 0);
8211 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
8214 case TARGET_NR_sched_setparam:
8216 struct sched_param *target_schp;
8217 struct sched_param schp;
8220 return -TARGET_EINVAL;
8222 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
8224 schp.sched_priority = tswap32(target_schp->sched_priority);
8225 unlock_user_struct(target_schp, arg2, 0);
8226 ret = get_errno(sched_setparam(arg1, &schp));
8229 case TARGET_NR_sched_getparam:
8231 struct sched_param *target_schp;
8232 struct sched_param schp;
8235 return -TARGET_EINVAL;
8237 ret = get_errno(sched_getparam(arg1, &schp));
8238 if (!is_error(ret)) {
8239 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
8241 target_schp->sched_priority = tswap32(schp.sched_priority);
8242 unlock_user_struct(target_schp, arg2, 1);
8246 case TARGET_NR_sched_setscheduler:
8248 struct sched_param *target_schp;
8249 struct sched_param schp;
8251 return -TARGET_EINVAL;
8253 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
8255 schp.sched_priority = tswap32(target_schp->sched_priority);
8256 unlock_user_struct(target_schp, arg3, 0);
8257 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
8260 case TARGET_NR_sched_getscheduler:
8261 ret = get_errno(sched_getscheduler(arg1));
8263 case TARGET_NR_sched_yield:
8264 ret = get_errno(sched_yield());
8266 case TARGET_NR_sched_get_priority_max:
8267 ret = get_errno(sched_get_priority_max(arg1));
8269 case TARGET_NR_sched_get_priority_min:
8270 ret = get_errno(sched_get_priority_min(arg1));
8272 case TARGET_NR_sched_rr_get_interval:
8275 ret = get_errno(sched_rr_get_interval(arg1, &ts));
8276 if (!is_error(ret)) {
8277 ret = host_to_target_timespec(arg2, &ts);
8281 case TARGET_NR_nanosleep:
8283 struct timespec req, rem;
8284 target_to_host_timespec(&req, arg1);
8285 ret = get_errno(nanosleep(&req, &rem));
8286 if (is_error(ret) && arg2) {
8287 host_to_target_timespec(arg2, &rem);
8291 #ifdef TARGET_NR_query_module
8292 case TARGET_NR_query_module:
8295 #ifdef TARGET_NR_nfsservctl
8296 case TARGET_NR_nfsservctl:
8299 case TARGET_NR_prctl:
8301 case PR_GET_PDEATHSIG:
8304 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
8305 if (!is_error(ret) && arg2
8306 && put_user_ual(deathsig, arg2)) {
8314 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
8318 ret = get_errno(prctl(arg1, (unsigned long)name,
8320 unlock_user(name, arg2, 16);
8325 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
8329 ret = get_errno(prctl(arg1, (unsigned long)name,
8331 unlock_user(name, arg2, 0);
8336 /* Most prctl options have no pointer arguments */
8337 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
8341 #ifdef TARGET_NR_arch_prctl
8342 case TARGET_NR_arch_prctl:
8343 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8344 ret = do_arch_prctl(cpu_env, arg1, arg2);
8350 #ifdef TARGET_NR_pread64
8351 case TARGET_NR_pread64:
8352 if (regpairs_aligned(cpu_env)) {
8356 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8358 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8359 unlock_user(p, arg2, ret);
8361 case TARGET_NR_pwrite64:
8362 if (regpairs_aligned(cpu_env)) {
8366 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8368 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8369 unlock_user(p, arg2, 0);
8372 case TARGET_NR_getcwd:
8373 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8375 ret = get_errno(sys_getcwd1(p, arg2));
8376 unlock_user(p, arg1, ret);
8378 case TARGET_NR_capget:
8379 case TARGET_NR_capset:
8381 struct target_user_cap_header *target_header;
8382 struct target_user_cap_data *target_data = NULL;
8383 struct __user_cap_header_struct header;
8384 struct __user_cap_data_struct data[2];
8385 struct __user_cap_data_struct *dataptr = NULL;
8386 int i, target_datalen;
8389 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8392 header.version = tswap32(target_header->version);
8393 header.pid = tswap32(target_header->pid);
8395 if (header.version != _LINUX_CAPABILITY_VERSION) {
8396 /* Version 2 and up takes pointer to two user_data structs */
8400 target_datalen = sizeof(*target_data) * data_items;
8403 if (num == TARGET_NR_capget) {
8404 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8406 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8409 unlock_user_struct(target_header, arg1, 0);
8413 if (num == TARGET_NR_capset) {
8414 for (i = 0; i < data_items; i++) {
8415 data[i].effective = tswap32(target_data[i].effective);
8416 data[i].permitted = tswap32(target_data[i].permitted);
8417 data[i].inheritable = tswap32(target_data[i].inheritable);
8424 if (num == TARGET_NR_capget) {
8425 ret = get_errno(capget(&header, dataptr));
8427 ret = get_errno(capset(&header, dataptr));
8430 /* The kernel always updates version for both capget and capset */
8431 target_header->version = tswap32(header.version);
8432 unlock_user_struct(target_header, arg1, 1);
8435 if (num == TARGET_NR_capget) {
8436 for (i = 0; i < data_items; i++) {
8437 target_data[i].effective = tswap32(data[i].effective);
8438 target_data[i].permitted = tswap32(data[i].permitted);
8439 target_data[i].inheritable = tswap32(data[i].inheritable);
8441 unlock_user(target_data, arg2, target_datalen);
8443 unlock_user(target_data, arg2, 0);
8448 case TARGET_NR_sigaltstack:
8449 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
8450 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
8451 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
8452 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8458 #ifdef CONFIG_SENDFILE
8459 case TARGET_NR_sendfile:
8464 ret = get_user_sal(off, arg3);
8465 if (is_error(ret)) {
8470 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8471 if (!is_error(ret) && arg3) {
8472 abi_long ret2 = put_user_sal(off, arg3);
8473 if (is_error(ret2)) {
8479 #ifdef TARGET_NR_sendfile64
8480 case TARGET_NR_sendfile64:
8485 ret = get_user_s64(off, arg3);
8486 if (is_error(ret)) {
8491 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8492 if (!is_error(ret) && arg3) {
8493 abi_long ret2 = put_user_s64(off, arg3);
8494 if (is_error(ret2)) {
8502 case TARGET_NR_sendfile:
8503 #ifdef TARGET_NR_sendfile64
8504 case TARGET_NR_sendfile64:
8509 #ifdef TARGET_NR_getpmsg
8510 case TARGET_NR_getpmsg:
8513 #ifdef TARGET_NR_putpmsg
8514 case TARGET_NR_putpmsg:
8517 #ifdef TARGET_NR_vfork
8518 case TARGET_NR_vfork:
8519 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8523 #ifdef TARGET_NR_ugetrlimit
8524 case TARGET_NR_ugetrlimit:
8527 int resource = target_to_host_resource(arg1);
8528 ret = get_errno(getrlimit(resource, &rlim));
8529 if (!is_error(ret)) {
8530 struct target_rlimit *target_rlim;
8531 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8533 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8534 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8535 unlock_user_struct(target_rlim, arg2, 1);
8540 #ifdef TARGET_NR_truncate64
8541 case TARGET_NR_truncate64:
8542 if (!(p = lock_user_string(arg1)))
8544 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8545 unlock_user(p, arg1, 0);
8548 #ifdef TARGET_NR_ftruncate64
8549 case TARGET_NR_ftruncate64:
8550 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8553 #ifdef TARGET_NR_stat64
8554 case TARGET_NR_stat64:
8555 if (!(p = lock_user_string(arg1)))
8557 ret = get_errno(stat(path(p), &st));
8558 unlock_user(p, arg1, 0);
8560 ret = host_to_target_stat64(cpu_env, arg2, &st);
8563 #ifdef TARGET_NR_lstat64
8564 case TARGET_NR_lstat64:
8565 if (!(p = lock_user_string(arg1)))
8567 ret = get_errno(lstat(path(p), &st));
8568 unlock_user(p, arg1, 0);
8570 ret = host_to_target_stat64(cpu_env, arg2, &st);
8573 #ifdef TARGET_NR_fstat64
8574 case TARGET_NR_fstat64:
8575 ret = get_errno(fstat(arg1, &st));
8577 ret = host_to_target_stat64(cpu_env, arg2, &st);
8580 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8581 #ifdef TARGET_NR_fstatat64
8582 case TARGET_NR_fstatat64:
8584 #ifdef TARGET_NR_newfstatat
8585 case TARGET_NR_newfstatat:
8587 if (!(p = lock_user_string(arg2)))
8589 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8591 ret = host_to_target_stat64(cpu_env, arg3, &st);
8594 #ifdef TARGET_NR_lchown
8595 case TARGET_NR_lchown:
8596 if (!(p = lock_user_string(arg1)))
8598 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8599 unlock_user(p, arg1, 0);
8602 #ifdef TARGET_NR_getuid
8603 case TARGET_NR_getuid:
8604 ret = get_errno(high2lowuid(getuid()));
8607 #ifdef TARGET_NR_getgid
8608 case TARGET_NR_getgid:
8609 ret = get_errno(high2lowgid(getgid()));
8612 #ifdef TARGET_NR_geteuid
8613 case TARGET_NR_geteuid:
8614 ret = get_errno(high2lowuid(geteuid()));
8617 #ifdef TARGET_NR_getegid
8618 case TARGET_NR_getegid:
8619 ret = get_errno(high2lowgid(getegid()));
8622 case TARGET_NR_setreuid:
8623 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8625 case TARGET_NR_setregid:
8626 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8628 case TARGET_NR_getgroups:
8630 int gidsetsize = arg1;
8631 target_id *target_grouplist;
8635 grouplist = alloca(gidsetsize * sizeof(gid_t));
8636 ret = get_errno(getgroups(gidsetsize, grouplist));
8637 if (gidsetsize == 0)
8639 if (!is_error(ret)) {
8640 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8641 if (!target_grouplist)
8643 for(i = 0;i < ret; i++)
8644 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8645 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8649 case TARGET_NR_setgroups:
8651 int gidsetsize = arg1;
8652 target_id *target_grouplist;
8653 gid_t *grouplist = NULL;
8656 grouplist = alloca(gidsetsize * sizeof(gid_t));
8657 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8658 if (!target_grouplist) {
8659 ret = -TARGET_EFAULT;
8662 for (i = 0; i < gidsetsize; i++) {
8663 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8665 unlock_user(target_grouplist, arg2, 0);
8667 ret = get_errno(setgroups(gidsetsize, grouplist));
8670 case TARGET_NR_fchown:
8671 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8673 #if defined(TARGET_NR_fchownat)
8674 case TARGET_NR_fchownat:
8675 if (!(p = lock_user_string(arg2)))
8677 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8678 low2highgid(arg4), arg5));
8679 unlock_user(p, arg2, 0);
8682 #ifdef TARGET_NR_setresuid
8683 case TARGET_NR_setresuid:
8684 ret = get_errno(setresuid(low2highuid(arg1),
8686 low2highuid(arg3)));
8689 #ifdef TARGET_NR_getresuid
8690 case TARGET_NR_getresuid:
8692 uid_t ruid, euid, suid;
8693 ret = get_errno(getresuid(&ruid, &euid, &suid));
8694 if (!is_error(ret)) {
8695 if (put_user_id(high2lowuid(ruid), arg1)
8696 || put_user_id(high2lowuid(euid), arg2)
8697 || put_user_id(high2lowuid(suid), arg3))
8703 #ifdef TARGET_NR_getresgid
8704 case TARGET_NR_setresgid:
8705 ret = get_errno(setresgid(low2highgid(arg1),
8707 low2highgid(arg3)));
8710 #ifdef TARGET_NR_getresgid
8711 case TARGET_NR_getresgid:
8713 gid_t rgid, egid, sgid;
8714 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8715 if (!is_error(ret)) {
8716 if (put_user_id(high2lowgid(rgid), arg1)
8717 || put_user_id(high2lowgid(egid), arg2)
8718 || put_user_id(high2lowgid(sgid), arg3))
8724 #ifdef TARGET_NR_chown
8725 case TARGET_NR_chown:
8726 if (!(p = lock_user_string(arg1)))
8728 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8729 unlock_user(p, arg1, 0);
8732 case TARGET_NR_setuid:
8733 ret = get_errno(setuid(low2highuid(arg1)));
8735 case TARGET_NR_setgid:
8736 ret = get_errno(setgid(low2highgid(arg1)));
8738 case TARGET_NR_setfsuid:
8739 ret = get_errno(setfsuid(arg1));
8741 case TARGET_NR_setfsgid:
8742 ret = get_errno(setfsgid(arg1));
8745 #ifdef TARGET_NR_lchown32
8746 case TARGET_NR_lchown32:
8747 if (!(p = lock_user_string(arg1)))
8749 ret = get_errno(lchown(p, arg2, arg3));
8750 unlock_user(p, arg1, 0);
8753 #ifdef TARGET_NR_getuid32
8754 case TARGET_NR_getuid32:
8755 ret = get_errno(getuid());
8759 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8760 /* Alpha specific */
8761 case TARGET_NR_getxuid:
8765 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8767 ret = get_errno(getuid());
8770 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8771 /* Alpha specific */
8772 case TARGET_NR_getxgid:
8776 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8778 ret = get_errno(getgid());
8781 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8782 /* Alpha specific */
8783 case TARGET_NR_osf_getsysinfo:
8784 ret = -TARGET_EOPNOTSUPP;
8786 case TARGET_GSI_IEEE_FP_CONTROL:
8788 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8790 /* Copied from linux ieee_fpcr_to_swcr. */
8791 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8792 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8793 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8794 | SWCR_TRAP_ENABLE_DZE
8795 | SWCR_TRAP_ENABLE_OVF);
8796 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8797 | SWCR_TRAP_ENABLE_INE);
8798 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8799 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8801 if (put_user_u64 (swcr, arg2))
8807 /* case GSI_IEEE_STATE_AT_SIGNAL:
8808 -- Not implemented in linux kernel.
8810 -- Retrieves current unaligned access state; not much used.
8812 -- Retrieves implver information; surely not used.
8814 -- Grabs a copy of the HWRPB; surely not used.
8819 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8820 /* Alpha specific */
8821 case TARGET_NR_osf_setsysinfo:
8822 ret = -TARGET_EOPNOTSUPP;
8824 case TARGET_SSI_IEEE_FP_CONTROL:
8826 uint64_t swcr, fpcr, orig_fpcr;
8828 if (get_user_u64 (swcr, arg2)) {
8831 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8832 fpcr = orig_fpcr & FPCR_DYN_MASK;
8834 /* Copied from linux ieee_swcr_to_fpcr. */
8835 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8836 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8837 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8838 | SWCR_TRAP_ENABLE_DZE
8839 | SWCR_TRAP_ENABLE_OVF)) << 48;
8840 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8841 | SWCR_TRAP_ENABLE_INE)) << 57;
8842 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8843 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8845 cpu_alpha_store_fpcr(cpu_env, fpcr);
8850 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8852 uint64_t exc, fpcr, orig_fpcr;
8855 if (get_user_u64(exc, arg2)) {
8859 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8861 /* We only add to the exception status here. */
8862 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8864 cpu_alpha_store_fpcr(cpu_env, fpcr);
8867 /* Old exceptions are not signaled. */
8868 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8870 /* If any exceptions set by this call,
8871 and are unmasked, send a signal. */
8873 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8874 si_code = TARGET_FPE_FLTRES;
8876 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8877 si_code = TARGET_FPE_FLTUND;
8879 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8880 si_code = TARGET_FPE_FLTOVF;
8882 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8883 si_code = TARGET_FPE_FLTDIV;
8885 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8886 si_code = TARGET_FPE_FLTINV;
8889 target_siginfo_t info;
8890 info.si_signo = SIGFPE;
8892 info.si_code = si_code;
8893 info._sifields._sigfault._addr
8894 = ((CPUArchState *)cpu_env)->pc;
8895 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8900 /* case SSI_NVPAIRS:
8901 -- Used with SSIN_UACPROC to enable unaligned accesses.
8902 case SSI_IEEE_STATE_AT_SIGNAL:
8903 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8904 -- Not implemented in linux kernel
8909 #ifdef TARGET_NR_osf_sigprocmask
8910 /* Alpha specific. */
8911 case TARGET_NR_osf_sigprocmask:
8915 sigset_t set, oldset;
8918 case TARGET_SIG_BLOCK:
8921 case TARGET_SIG_UNBLOCK:
8924 case TARGET_SIG_SETMASK:
8928 ret = -TARGET_EINVAL;
8932 target_to_host_old_sigset(&set, &mask);
8933 do_sigprocmask(how, &set, &oldset);
8934 host_to_target_old_sigset(&mask, &oldset);
8940 #ifdef TARGET_NR_getgid32
8941 case TARGET_NR_getgid32:
8942 ret = get_errno(getgid());
8945 #ifdef TARGET_NR_geteuid32
8946 case TARGET_NR_geteuid32:
8947 ret = get_errno(geteuid());
8950 #ifdef TARGET_NR_getegid32
8951 case TARGET_NR_getegid32:
8952 ret = get_errno(getegid());
8955 #ifdef TARGET_NR_setreuid32
8956 case TARGET_NR_setreuid32:
8957 ret = get_errno(setreuid(arg1, arg2));
8960 #ifdef TARGET_NR_setregid32
8961 case TARGET_NR_setregid32:
8962 ret = get_errno(setregid(arg1, arg2));
8965 #ifdef TARGET_NR_getgroups32
8966 case TARGET_NR_getgroups32:
8968 int gidsetsize = arg1;
8969 uint32_t *target_grouplist;
8973 grouplist = alloca(gidsetsize * sizeof(gid_t));
8974 ret = get_errno(getgroups(gidsetsize, grouplist));
8975 if (gidsetsize == 0)
8977 if (!is_error(ret)) {
8978 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8979 if (!target_grouplist) {
8980 ret = -TARGET_EFAULT;
8983 for(i = 0;i < ret; i++)
8984 target_grouplist[i] = tswap32(grouplist[i]);
8985 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8990 #ifdef TARGET_NR_setgroups32
8991 case TARGET_NR_setgroups32:
8993 int gidsetsize = arg1;
8994 uint32_t *target_grouplist;
8998 grouplist = alloca(gidsetsize * sizeof(gid_t));
8999 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9000 if (!target_grouplist) {
9001 ret = -TARGET_EFAULT;
9004 for(i = 0;i < gidsetsize; i++)
9005 grouplist[i] = tswap32(target_grouplist[i]);
9006 unlock_user(target_grouplist, arg2, 0);
9007 ret = get_errno(setgroups(gidsetsize, grouplist));
9011 #ifdef TARGET_NR_fchown32
9012 case TARGET_NR_fchown32:
9013 ret = get_errno(fchown(arg1, arg2, arg3));
9016 #ifdef TARGET_NR_setresuid32
9017 case TARGET_NR_setresuid32:
9018 ret = get_errno(setresuid(arg1, arg2, arg3));
9021 #ifdef TARGET_NR_getresuid32
9022 case TARGET_NR_getresuid32:
9024 uid_t ruid, euid, suid;
9025 ret = get_errno(getresuid(&ruid, &euid, &suid));
9026 if (!is_error(ret)) {
9027 if (put_user_u32(ruid, arg1)
9028 || put_user_u32(euid, arg2)
9029 || put_user_u32(suid, arg3))
9035 #ifdef TARGET_NR_setresgid32
9036 case TARGET_NR_setresgid32:
9037 ret = get_errno(setresgid(arg1, arg2, arg3));
9040 #ifdef TARGET_NR_getresgid32
9041 case TARGET_NR_getresgid32:
9043 gid_t rgid, egid, sgid;
9044 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9045 if (!is_error(ret)) {
9046 if (put_user_u32(rgid, arg1)
9047 || put_user_u32(egid, arg2)
9048 || put_user_u32(sgid, arg3))
9054 #ifdef TARGET_NR_chown32
9055 case TARGET_NR_chown32:
9056 if (!(p = lock_user_string(arg1)))
9058 ret = get_errno(chown(p, arg2, arg3));
9059 unlock_user(p, arg1, 0);
9062 #ifdef TARGET_NR_setuid32
9063 case TARGET_NR_setuid32:
9064 ret = get_errno(setuid(arg1));
9067 #ifdef TARGET_NR_setgid32
9068 case TARGET_NR_setgid32:
9069 ret = get_errno(setgid(arg1));
9072 #ifdef TARGET_NR_setfsuid32
9073 case TARGET_NR_setfsuid32:
9074 ret = get_errno(setfsuid(arg1));
9077 #ifdef TARGET_NR_setfsgid32
9078 case TARGET_NR_setfsgid32:
9079 ret = get_errno(setfsgid(arg1));
9083 case TARGET_NR_pivot_root:
9085 #ifdef TARGET_NR_mincore
9086 case TARGET_NR_mincore:
9089 ret = -TARGET_EFAULT;
9090 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
9092 if (!(p = lock_user_string(arg3)))
9094 ret = get_errno(mincore(a, arg2, p));
9095 unlock_user(p, arg3, ret);
9097 unlock_user(a, arg1, 0);
9101 #ifdef TARGET_NR_arm_fadvise64_64
9102 case TARGET_NR_arm_fadvise64_64:
9105 * arm_fadvise64_64 looks like fadvise64_64 but
9106 * with different argument order
9114 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9115 #ifdef TARGET_NR_fadvise64_64
9116 case TARGET_NR_fadvise64_64:
9118 #ifdef TARGET_NR_fadvise64
9119 case TARGET_NR_fadvise64:
9123 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
9124 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
9125 case 6: arg4 = POSIX_FADV_DONTNEED; break;
9126 case 7: arg4 = POSIX_FADV_NOREUSE; break;
9130 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
9133 #ifdef TARGET_NR_madvise
9134 case TARGET_NR_madvise:
9135 /* A straight passthrough may not be safe because qemu sometimes
9136 turns private file-backed mappings into anonymous mappings.
9137 This will break MADV_DONTNEED.
9138 This is a hint, so ignoring and returning success is ok. */
9142 #if TARGET_ABI_BITS == 32
9143 case TARGET_NR_fcntl64:
9147 struct target_flock64 *target_fl;
9149 struct target_eabi_flock64 *target_efl;
9152 cmd = target_to_host_fcntl_cmd(arg2);
9153 if (cmd == -TARGET_EINVAL) {
9159 case TARGET_F_GETLK64:
9161 if (((CPUARMState *)cpu_env)->eabi) {
9162 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9164 fl.l_type = tswap16(target_efl->l_type);
9165 fl.l_whence = tswap16(target_efl->l_whence);
9166 fl.l_start = tswap64(target_efl->l_start);
9167 fl.l_len = tswap64(target_efl->l_len);
9168 fl.l_pid = tswap32(target_efl->l_pid);
9169 unlock_user_struct(target_efl, arg3, 0);
9173 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9175 fl.l_type = tswap16(target_fl->l_type);
9176 fl.l_whence = tswap16(target_fl->l_whence);
9177 fl.l_start = tswap64(target_fl->l_start);
9178 fl.l_len = tswap64(target_fl->l_len);
9179 fl.l_pid = tswap32(target_fl->l_pid);
9180 unlock_user_struct(target_fl, arg3, 0);
9182 ret = get_errno(fcntl(arg1, cmd, &fl));
9185 if (((CPUARMState *)cpu_env)->eabi) {
9186 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
9188 target_efl->l_type = tswap16(fl.l_type);
9189 target_efl->l_whence = tswap16(fl.l_whence);
9190 target_efl->l_start = tswap64(fl.l_start);
9191 target_efl->l_len = tswap64(fl.l_len);
9192 target_efl->l_pid = tswap32(fl.l_pid);
9193 unlock_user_struct(target_efl, arg3, 1);
9197 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
9199 target_fl->l_type = tswap16(fl.l_type);
9200 target_fl->l_whence = tswap16(fl.l_whence);
9201 target_fl->l_start = tswap64(fl.l_start);
9202 target_fl->l_len = tswap64(fl.l_len);
9203 target_fl->l_pid = tswap32(fl.l_pid);
9204 unlock_user_struct(target_fl, arg3, 1);
9209 case TARGET_F_SETLK64:
9210 case TARGET_F_SETLKW64:
9212 if (((CPUARMState *)cpu_env)->eabi) {
9213 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9215 fl.l_type = tswap16(target_efl->l_type);
9216 fl.l_whence = tswap16(target_efl->l_whence);
9217 fl.l_start = tswap64(target_efl->l_start);
9218 fl.l_len = tswap64(target_efl->l_len);
9219 fl.l_pid = tswap32(target_efl->l_pid);
9220 unlock_user_struct(target_efl, arg3, 0);
9224 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9226 fl.l_type = tswap16(target_fl->l_type);
9227 fl.l_whence = tswap16(target_fl->l_whence);
9228 fl.l_start = tswap64(target_fl->l_start);
9229 fl.l_len = tswap64(target_fl->l_len);
9230 fl.l_pid = tswap32(target_fl->l_pid);
9231 unlock_user_struct(target_fl, arg3, 0);
9233 ret = get_errno(fcntl(arg1, cmd, &fl));
9236 ret = do_fcntl(arg1, arg2, arg3);
9242 #ifdef TARGET_NR_cacheflush
9243 case TARGET_NR_cacheflush:
9244 /* self-modifying code is handled automatically, so nothing needed */
9248 #ifdef TARGET_NR_security
9249 case TARGET_NR_security:
9252 #ifdef TARGET_NR_getpagesize
9253 case TARGET_NR_getpagesize:
9254 ret = TARGET_PAGE_SIZE;
9257 case TARGET_NR_gettid:
9258 ret = get_errno(gettid());
9260 #ifdef TARGET_NR_readahead
9261 case TARGET_NR_readahead:
9262 #if TARGET_ABI_BITS == 32
9263 if (regpairs_aligned(cpu_env)) {
9268 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
9270 ret = get_errno(readahead(arg1, arg2, arg3));
9275 #ifdef TARGET_NR_setxattr
9276 case TARGET_NR_listxattr:
9277 case TARGET_NR_llistxattr:
9281 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9283 ret = -TARGET_EFAULT;
9287 p = lock_user_string(arg1);
9289 if (num == TARGET_NR_listxattr) {
9290 ret = get_errno(listxattr(p, b, arg3));
9292 ret = get_errno(llistxattr(p, b, arg3));
9295 ret = -TARGET_EFAULT;
9297 unlock_user(p, arg1, 0);
9298 unlock_user(b, arg2, arg3);
9301 case TARGET_NR_flistxattr:
9305 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9307 ret = -TARGET_EFAULT;
9311 ret = get_errno(flistxattr(arg1, b, arg3));
9312 unlock_user(b, arg2, arg3);
9315 case TARGET_NR_setxattr:
9316 case TARGET_NR_lsetxattr:
9318 void *p, *n, *v = 0;
9320 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9322 ret = -TARGET_EFAULT;
9326 p = lock_user_string(arg1);
9327 n = lock_user_string(arg2);
9329 if (num == TARGET_NR_setxattr) {
9330 ret = get_errno(setxattr(p, n, v, arg4, arg5));
9332 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
9335 ret = -TARGET_EFAULT;
9337 unlock_user(p, arg1, 0);
9338 unlock_user(n, arg2, 0);
9339 unlock_user(v, arg3, 0);
9342 case TARGET_NR_fsetxattr:
9346 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9348 ret = -TARGET_EFAULT;
9352 n = lock_user_string(arg2);
9354 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9356 ret = -TARGET_EFAULT;
9358 unlock_user(n, arg2, 0);
9359 unlock_user(v, arg3, 0);
9362 case TARGET_NR_getxattr:
9363 case TARGET_NR_lgetxattr:
9365 void *p, *n, *v = 0;
9367 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9369 ret = -TARGET_EFAULT;
9373 p = lock_user_string(arg1);
9374 n = lock_user_string(arg2);
9376 if (num == TARGET_NR_getxattr) {
9377 ret = get_errno(getxattr(p, n, v, arg4));
9379 ret = get_errno(lgetxattr(p, n, v, arg4));
9382 ret = -TARGET_EFAULT;
9384 unlock_user(p, arg1, 0);
9385 unlock_user(n, arg2, 0);
9386 unlock_user(v, arg3, arg4);
9389 case TARGET_NR_fgetxattr:
9393 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9395 ret = -TARGET_EFAULT;
9399 n = lock_user_string(arg2);
9401 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9403 ret = -TARGET_EFAULT;
9405 unlock_user(n, arg2, 0);
9406 unlock_user(v, arg3, arg4);
9409 case TARGET_NR_removexattr:
9410 case TARGET_NR_lremovexattr:
9413 p = lock_user_string(arg1);
9414 n = lock_user_string(arg2);
9416 if (num == TARGET_NR_removexattr) {
9417 ret = get_errno(removexattr(p, n));
9419 ret = get_errno(lremovexattr(p, n));
9422 ret = -TARGET_EFAULT;
9424 unlock_user(p, arg1, 0);
9425 unlock_user(n, arg2, 0);
9428 case TARGET_NR_fremovexattr:
9431 n = lock_user_string(arg2);
9433 ret = get_errno(fremovexattr(arg1, n));
9435 ret = -TARGET_EFAULT;
9437 unlock_user(n, arg2, 0);
9441 #endif /* CONFIG_ATTR */
9442 #ifdef TARGET_NR_set_thread_area
9443 case TARGET_NR_set_thread_area:
9444 #if defined(TARGET_MIPS)
9445 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9448 #elif defined(TARGET_CRIS)
9450 ret = -TARGET_EINVAL;
9452 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9456 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9457 ret = do_set_thread_area(cpu_env, arg1);
9459 #elif defined(TARGET_M68K)
9461 TaskState *ts = cpu->opaque;
9462 ts->tp_value = arg1;
9467 goto unimplemented_nowarn;
9470 #ifdef TARGET_NR_get_thread_area
9471 case TARGET_NR_get_thread_area:
9472 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9473 ret = do_get_thread_area(cpu_env, arg1);
9475 #elif defined(TARGET_M68K)
9477 TaskState *ts = cpu->opaque;
9482 goto unimplemented_nowarn;
9485 #ifdef TARGET_NR_getdomainname
9486 case TARGET_NR_getdomainname:
9487 goto unimplemented_nowarn;
9490 #ifdef TARGET_NR_clock_gettime
9491 case TARGET_NR_clock_gettime:
9494 ret = get_errno(clock_gettime(arg1, &ts));
9495 if (!is_error(ret)) {
9496 host_to_target_timespec(arg2, &ts);
9501 #ifdef TARGET_NR_clock_getres
9502 case TARGET_NR_clock_getres:
9505 ret = get_errno(clock_getres(arg1, &ts));
9506 if (!is_error(ret)) {
9507 host_to_target_timespec(arg2, &ts);
9512 #ifdef TARGET_NR_clock_nanosleep
9513 case TARGET_NR_clock_nanosleep:
9516 target_to_host_timespec(&ts, arg3);
9517 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9519 host_to_target_timespec(arg4, &ts);
9521 #if defined(TARGET_PPC)
9522 /* clock_nanosleep is odd in that it returns positive errno values.
9523 * On PPC, CR0 bit 3 should be set in such a situation. */
9525 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9532 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9533 case TARGET_NR_set_tid_address:
9534 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9538 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9539 case TARGET_NR_tkill:
9540 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9544 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9545 case TARGET_NR_tgkill:
9546 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9547 target_to_host_signal(arg3)));
9551 #ifdef TARGET_NR_set_robust_list
9552 case TARGET_NR_set_robust_list:
9553 case TARGET_NR_get_robust_list:
9554 /* The ABI for supporting robust futexes has userspace pass
9555 * the kernel a pointer to a linked list which is updated by
9556 * userspace after the syscall; the list is walked by the kernel
9557 * when the thread exits. Since the linked list in QEMU guest
9558 * memory isn't a valid linked list for the host and we have
9559 * no way to reliably intercept the thread-death event, we can't
9560 * support these. Silently return ENOSYS so that guest userspace
9561 * falls back to a non-robust futex implementation (which should
9562 * be OK except in the corner case of the guest crashing while
9563 * holding a mutex that is shared with another process via
9566 goto unimplemented_nowarn;
9569 #if defined(TARGET_NR_utimensat)
9570 case TARGET_NR_utimensat:
9572 struct timespec *tsp, ts[2];
9576 target_to_host_timespec(ts, arg3);
9577 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9581 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9583 if (!(p = lock_user_string(arg2))) {
9584 ret = -TARGET_EFAULT;
9587 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9588 unlock_user(p, arg2, 0);
9593 case TARGET_NR_futex:
9594 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9596 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9597 case TARGET_NR_inotify_init:
9598 ret = get_errno(sys_inotify_init());
9601 #ifdef CONFIG_INOTIFY1
9602 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9603 case TARGET_NR_inotify_init1:
9604 ret = get_errno(sys_inotify_init1(arg1));
9608 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9609 case TARGET_NR_inotify_add_watch:
9610 p = lock_user_string(arg2);
9611 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9612 unlock_user(p, arg2, 0);
9615 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9616 case TARGET_NR_inotify_rm_watch:
9617 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9621 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9622 case TARGET_NR_mq_open:
9624 struct mq_attr posix_mq_attr, *attrp;
9626 p = lock_user_string(arg1 - 1);
9628 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9629 attrp = &posix_mq_attr;
9633 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9634 unlock_user (p, arg1, 0);
9638 case TARGET_NR_mq_unlink:
9639 p = lock_user_string(arg1 - 1);
9640 ret = get_errno(mq_unlink(p));
9641 unlock_user (p, arg1, 0);
9644 case TARGET_NR_mq_timedsend:
9648 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9650 target_to_host_timespec(&ts, arg5);
9651 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9652 host_to_target_timespec(arg5, &ts);
9655 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9656 unlock_user (p, arg2, arg3);
9660 case TARGET_NR_mq_timedreceive:
9665 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9667 target_to_host_timespec(&ts, arg5);
9668 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9669 host_to_target_timespec(arg5, &ts);
9672 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9673 unlock_user (p, arg2, arg3);
9675 put_user_u32(prio, arg4);
9679 /* Not implemented for now... */
9680 /* case TARGET_NR_mq_notify: */
9683 case TARGET_NR_mq_getsetattr:
9685 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9688 ret = mq_getattr(arg1, &posix_mq_attr_out);
9689 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9692 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9693 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9700 #ifdef CONFIG_SPLICE
9701 #ifdef TARGET_NR_tee
9704 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9708 #ifdef TARGET_NR_splice
9709 case TARGET_NR_splice:
9711 loff_t loff_in, loff_out;
9712 loff_t *ploff_in = NULL, *ploff_out = NULL;
9714 if (get_user_u64(loff_in, arg2)) {
9717 ploff_in = &loff_in;
9720 if (get_user_u64(loff_out, arg4)) {
9723 ploff_out = &loff_out;
9725 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9727 if (put_user_u64(loff_in, arg2)) {
9732 if (put_user_u64(loff_out, arg4)) {
9739 #ifdef TARGET_NR_vmsplice
9740 case TARGET_NR_vmsplice:
9742 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9744 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9745 unlock_iovec(vec, arg2, arg3, 0);
9747 ret = -host_to_target_errno(errno);
9752 #endif /* CONFIG_SPLICE */
9753 #ifdef CONFIG_EVENTFD
9754 #if defined(TARGET_NR_eventfd)
9755 case TARGET_NR_eventfd:
9756 ret = get_errno(eventfd(arg1, 0));
9757 fd_trans_unregister(ret);
9760 #if defined(TARGET_NR_eventfd2)
9761 case TARGET_NR_eventfd2:
9763 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9764 if (arg2 & TARGET_O_NONBLOCK) {
9765 host_flags |= O_NONBLOCK;
9767 if (arg2 & TARGET_O_CLOEXEC) {
9768 host_flags |= O_CLOEXEC;
9770 ret = get_errno(eventfd(arg1, host_flags));
9771 fd_trans_unregister(ret);
9775 #endif /* CONFIG_EVENTFD */
9776 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9777 case TARGET_NR_fallocate:
9778 #if TARGET_ABI_BITS == 32
9779 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9780 target_offset64(arg5, arg6)));
9782 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9786 #if defined(CONFIG_SYNC_FILE_RANGE)
9787 #if defined(TARGET_NR_sync_file_range)
9788 case TARGET_NR_sync_file_range:
9789 #if TARGET_ABI_BITS == 32
9790 #if defined(TARGET_MIPS)
9791 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9792 target_offset64(arg5, arg6), arg7));
9794 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9795 target_offset64(arg4, arg5), arg6));
9796 #endif /* !TARGET_MIPS */
9798 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9802 #if defined(TARGET_NR_sync_file_range2)
9803 case TARGET_NR_sync_file_range2:
9804 /* This is like sync_file_range but the arguments are reordered */
9805 #if TARGET_ABI_BITS == 32
9806 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9807 target_offset64(arg5, arg6), arg2));
9809 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9814 #if defined(TARGET_NR_signalfd4)
9815 case TARGET_NR_signalfd4:
9816 ret = do_signalfd4(arg1, arg2, arg4);
9819 #if defined(TARGET_NR_signalfd)
9820 case TARGET_NR_signalfd:
9821 ret = do_signalfd4(arg1, arg2, 0);
9824 #if defined(CONFIG_EPOLL)
9825 #if defined(TARGET_NR_epoll_create)
9826 case TARGET_NR_epoll_create:
9827 ret = get_errno(epoll_create(arg1));
9830 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9831 case TARGET_NR_epoll_create1:
9832 ret = get_errno(epoll_create1(arg1));
9835 #if defined(TARGET_NR_epoll_ctl)
9836 case TARGET_NR_epoll_ctl:
9838 struct epoll_event ep;
9839 struct epoll_event *epp = 0;
9841 struct target_epoll_event *target_ep;
9842 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9845 ep.events = tswap32(target_ep->events);
9846 /* The epoll_data_t union is just opaque data to the kernel,
9847 * so we transfer all 64 bits across and need not worry what
9848 * actual data type it is.
9850 ep.data.u64 = tswap64(target_ep->data.u64);
9851 unlock_user_struct(target_ep, arg4, 0);
9854 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9859 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9860 #define IMPLEMENT_EPOLL_PWAIT
9862 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9863 #if defined(TARGET_NR_epoll_wait)
9864 case TARGET_NR_epoll_wait:
9866 #if defined(IMPLEMENT_EPOLL_PWAIT)
9867 case TARGET_NR_epoll_pwait:
9870 struct target_epoll_event *target_ep;
9871 struct epoll_event *ep;
9873 int maxevents = arg3;
9876 target_ep = lock_user(VERIFY_WRITE, arg2,
9877 maxevents * sizeof(struct target_epoll_event), 1);
9882 ep = alloca(maxevents * sizeof(struct epoll_event));
9885 #if defined(IMPLEMENT_EPOLL_PWAIT)
9886 case TARGET_NR_epoll_pwait:
9888 target_sigset_t *target_set;
9889 sigset_t _set, *set = &_set;
9892 target_set = lock_user(VERIFY_READ, arg5,
9893 sizeof(target_sigset_t), 1);
9895 unlock_user(target_ep, arg2, 0);
9898 target_to_host_sigset(set, target_set);
9899 unlock_user(target_set, arg5, 0);
9904 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9908 #if defined(TARGET_NR_epoll_wait)
9909 case TARGET_NR_epoll_wait:
9910 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9914 ret = -TARGET_ENOSYS;
9916 if (!is_error(ret)) {
9918 for (i = 0; i < ret; i++) {
9919 target_ep[i].events = tswap32(ep[i].events);
9920 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9923 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9928 #ifdef TARGET_NR_prlimit64
9929 case TARGET_NR_prlimit64:
9931 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9932 struct target_rlimit64 *target_rnew, *target_rold;
9933 struct host_rlimit64 rnew, rold, *rnewp = 0;
9934 int resource = target_to_host_resource(arg2);
9936 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9939 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9940 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9941 unlock_user_struct(target_rnew, arg3, 0);
9945 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
9946 if (!is_error(ret) && arg4) {
9947 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9950 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9951 target_rold->rlim_max = tswap64(rold.rlim_max);
9952 unlock_user_struct(target_rold, arg4, 1);
9957 #ifdef TARGET_NR_gethostname
9958 case TARGET_NR_gethostname:
9960 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9962 ret = get_errno(gethostname(name, arg2));
9963 unlock_user(name, arg1, arg2);
9965 ret = -TARGET_EFAULT;
9970 #ifdef TARGET_NR_atomic_cmpxchg_32
9971 case TARGET_NR_atomic_cmpxchg_32:
9973 /* should use start_exclusive from main.c */
9974 abi_ulong mem_value;
9975 if (get_user_u32(mem_value, arg6)) {
9976 target_siginfo_t info;
9977 info.si_signo = SIGSEGV;
9979 info.si_code = TARGET_SEGV_MAPERR;
9980 info._sifields._sigfault._addr = arg6;
9981 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9985 if (mem_value == arg2)
9986 put_user_u32(arg1, arg6);
9991 #ifdef TARGET_NR_atomic_barrier
9992 case TARGET_NR_atomic_barrier:
9994 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10000 #ifdef TARGET_NR_timer_create
10001 case TARGET_NR_timer_create:
10003 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10005 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10008 int timer_index = next_free_host_timer();
10010 if (timer_index < 0) {
10011 ret = -TARGET_EAGAIN;
10013 timer_t *phtimer = g_posix_timers + timer_index;
10016 phost_sevp = &host_sevp;
10017 ret = target_to_host_sigevent(phost_sevp, arg2);
10023 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10027 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10036 #ifdef TARGET_NR_timer_settime
10037 case TARGET_NR_timer_settime:
10039 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10040 * struct itimerspec * old_value */
10041 target_timer_t timerid = get_timer_id(arg1);
10045 } else if (arg3 == 0) {
10046 ret = -TARGET_EINVAL;
10048 timer_t htimer = g_posix_timers[timerid];
10049 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
10051 target_to_host_itimerspec(&hspec_new, arg3);
10053 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
10054 host_to_target_itimerspec(arg2, &hspec_old);
10060 #ifdef TARGET_NR_timer_gettime
10061 case TARGET_NR_timer_gettime:
10063 /* args: timer_t timerid, struct itimerspec *curr_value */
10064 target_timer_t timerid = get_timer_id(arg1);
10068 } else if (!arg2) {
10069 ret = -TARGET_EFAULT;
10071 timer_t htimer = g_posix_timers[timerid];
10072 struct itimerspec hspec;
10073 ret = get_errno(timer_gettime(htimer, &hspec));
10075 if (host_to_target_itimerspec(arg2, &hspec)) {
10076 ret = -TARGET_EFAULT;
10083 #ifdef TARGET_NR_timer_getoverrun
10084 case TARGET_NR_timer_getoverrun:
10086 /* args: timer_t timerid */
10087 target_timer_t timerid = get_timer_id(arg1);
10092 timer_t htimer = g_posix_timers[timerid];
10093 ret = get_errno(timer_getoverrun(htimer));
10095 fd_trans_unregister(ret);
10100 #ifdef TARGET_NR_timer_delete
10101 case TARGET_NR_timer_delete:
10103 /* args: timer_t timerid */
10104 target_timer_t timerid = get_timer_id(arg1);
10109 timer_t htimer = g_posix_timers[timerid];
10110 ret = get_errno(timer_delete(htimer));
10111 g_posix_timers[timerid] = 0;
10117 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10118 case TARGET_NR_timerfd_create:
10119 ret = get_errno(timerfd_create(arg1,
10120 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
10124 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10125 case TARGET_NR_timerfd_gettime:
10127 struct itimerspec its_curr;
10129 ret = get_errno(timerfd_gettime(arg1, &its_curr));
10131 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
10138 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10139 case TARGET_NR_timerfd_settime:
10141 struct itimerspec its_new, its_old, *p_new;
10144 if (target_to_host_itimerspec(&its_new, arg3)) {
10152 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
10154 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
10161 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10162 case TARGET_NR_ioprio_get:
10163 ret = get_errno(ioprio_get(arg1, arg2));
10167 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10168 case TARGET_NR_ioprio_set:
10169 ret = get_errno(ioprio_set(arg1, arg2, arg3));
10173 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10174 case TARGET_NR_setns:
10175 ret = get_errno(setns(arg1, arg2));
10178 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10179 case TARGET_NR_unshare:
10180 ret = get_errno(unshare(arg1));
10186 gemu_log("qemu: Unsupported syscall: %d\n", num);
10187 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10188 unimplemented_nowarn:
10190 ret = -TARGET_ENOSYS;
10195 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
10198 print_syscall_ret(num, ret);
10201 ret = -TARGET_EFAULT;