4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
46 #include <linux/capability.h>
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
53 #include <sys/socket.h>
57 #include <sys/times.h>
60 #include <sys/statfs.h>
62 #include <sys/sysinfo.h>
63 #include <sys/signalfd.h>
64 //#include <sys/user.h>
65 #include <netinet/ip.h>
66 #include <netinet/tcp.h>
67 #include <linux/wireless.h>
68 #include <linux/icmp.h>
69 #include "qemu-common.h"
71 #include <sys/timerfd.h>
77 #include <sys/eventfd.h>
80 #include <sys/epoll.h>
83 #include "qemu/xattr.h"
85 #ifdef CONFIG_SENDFILE
86 #include <sys/sendfile.h>
89 #define termios host_termios
90 #define winsize host_winsize
91 #define termio host_termio
92 #define sgttyb host_sgttyb /* same as target */
93 #define tchars host_tchars /* same as target */
94 #define ltchars host_ltchars /* same as target */
96 #include <linux/termios.h>
97 #include <linux/unistd.h>
98 #include <linux/cdrom.h>
99 #include <linux/hdreg.h>
100 #include <linux/soundcard.h>
101 #include <linux/kd.h>
102 #include <linux/mtio.h>
103 #include <linux/fs.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
107 #include <linux/fb.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include "linux_loop.h"
119 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
120 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
137 #define _syscall0(type,name) \
138 static type name (void) \
140 return syscall(__NR_##name); \
143 #define _syscall1(type,name,type1,arg1) \
144 static type name (type1 arg1) \
146 return syscall(__NR_##name, arg1); \
149 #define _syscall2(type,name,type1,arg1,type2,arg2) \
150 static type name (type1 arg1,type2 arg2) \
152 return syscall(__NR_##name, arg1, arg2); \
155 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
156 static type name (type1 arg1,type2 arg2,type3 arg3) \
158 return syscall(__NR_##name, arg1, arg2, arg3); \
161 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
162 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
164 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
167 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
169 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
175 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
176 type5,arg5,type6,arg6) \
177 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
180 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
184 #define __NR_sys_uname __NR_uname
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_futex __NR_futex
194 #define __NR_sys_inotify_init __NR_inotify_init
195 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
196 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
198 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
200 #define __NR__llseek __NR_lseek
203 /* Newer kernel ports have llseek() instead of _llseek() */
204 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
205 #define TARGET_NR__llseek TARGET_NR_llseek
209 _syscall0(int, gettid)
211 /* This is a replacement for the host gettid() and must return a host
213 static int gettid(void) {
217 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
218 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
220 #if !defined(__NR_getdents) || \
221 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
222 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
224 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
225 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
226 loff_t *, res, uint, wh);
228 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
229 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
230 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
231 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
233 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
234 _syscall2(int,sys_tkill,int,tid,int,sig)
236 #ifdef __NR_exit_group
237 _syscall1(int,exit_group,int,error_code)
239 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
240 _syscall1(int,set_tid_address,int *,tidptr)
242 #if defined(TARGET_NR_futex) && defined(__NR_futex)
243 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
244 const struct timespec *,timeout,int *,uaddr2,int,val3)
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
252 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
254 _syscall2(int, capget, struct __user_cap_header_struct *, header,
255 struct __user_cap_data_struct *, data);
256 _syscall2(int, capset, struct __user_cap_header_struct *, header,
257 struct __user_cap_data_struct *, data);
258 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
259 _syscall2(int, ioprio_get, int, which, int, who)
261 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
262 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
265 static bitmask_transtbl fcntl_flags_tbl[] = {
266 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
267 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
268 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
269 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
270 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
271 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
272 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
273 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
274 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
275 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
276 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
277 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
278 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
279 #if defined(O_DIRECT)
280 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
282 #if defined(O_NOATIME)
283 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
285 #if defined(O_CLOEXEC)
286 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
289 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
291 /* Don't terminate the list prematurely on 64-bit host+guest. */
292 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
293 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
298 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
299 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
300 typedef struct TargetFdTrans {
301 TargetFdDataFunc host_to_target_data;
302 TargetFdDataFunc target_to_host_data;
303 TargetFdAddrFunc target_to_host_addr;
306 static TargetFdTrans **target_fd_trans;
308 static unsigned int target_fd_max;
310 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
312 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
313 return target_fd_trans[fd]->host_to_target_data;
318 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
320 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
321 return target_fd_trans[fd]->target_to_host_addr;
326 static void fd_trans_register(int fd, TargetFdTrans *trans)
330 if (fd >= target_fd_max) {
331 oldmax = target_fd_max;
332 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
333 target_fd_trans = g_realloc(target_fd_trans,
334 target_fd_max * sizeof(TargetFdTrans));
335 memset((void *)(target_fd_trans + oldmax), 0,
336 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
338 target_fd_trans[fd] = trans;
341 static void fd_trans_unregister(int fd)
343 if (fd >= 0 && fd < target_fd_max) {
344 target_fd_trans[fd] = NULL;
348 static void fd_trans_dup(int oldfd, int newfd)
350 fd_trans_unregister(newfd);
351 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
352 fd_trans_register(newfd, target_fd_trans[oldfd]);
356 static int sys_getcwd1(char *buf, size_t size)
358 if (getcwd(buf, size) == NULL) {
359 /* getcwd() sets errno */
362 return strlen(buf)+1;
365 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
368 * open(2) has extra parameter 'mode' when called with
371 if ((flags & O_CREAT) != 0) {
372 return (openat(dirfd, pathname, flags, mode));
374 return (openat(dirfd, pathname, flags));
377 #ifdef TARGET_NR_utimensat
378 #ifdef CONFIG_UTIMENSAT
379 static int sys_utimensat(int dirfd, const char *pathname,
380 const struct timespec times[2], int flags)
382 if (pathname == NULL)
383 return futimens(dirfd, times);
385 return utimensat(dirfd, pathname, times, flags);
387 #elif defined(__NR_utimensat)
388 #define __NR_sys_utimensat __NR_utimensat
389 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
390 const struct timespec *,tsp,int,flags)
392 static int sys_utimensat(int dirfd, const char *pathname,
393 const struct timespec times[2], int flags)
399 #endif /* TARGET_NR_utimensat */
401 #ifdef CONFIG_INOTIFY
402 #include <sys/inotify.h>
404 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
405 static int sys_inotify_init(void)
407 return (inotify_init());
410 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
411 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
413 return (inotify_add_watch(fd, pathname, mask));
416 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
417 static int sys_inotify_rm_watch(int fd, int32_t wd)
419 return (inotify_rm_watch(fd, wd));
422 #ifdef CONFIG_INOTIFY1
423 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
424 static int sys_inotify_init1(int flags)
426 return (inotify_init1(flags));
431 /* Userspace can usually survive runtime without inotify */
432 #undef TARGET_NR_inotify_init
433 #undef TARGET_NR_inotify_init1
434 #undef TARGET_NR_inotify_add_watch
435 #undef TARGET_NR_inotify_rm_watch
436 #endif /* CONFIG_INOTIFY */
438 #if defined(TARGET_NR_ppoll)
440 # define __NR_ppoll -1
442 #define __NR_sys_ppoll __NR_ppoll
443 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
444 struct timespec *, timeout, const sigset_t *, sigmask,
448 #if defined(TARGET_NR_pselect6)
449 #ifndef __NR_pselect6
450 # define __NR_pselect6 -1
452 #define __NR_sys_pselect6 __NR_pselect6
453 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
454 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
457 #if defined(TARGET_NR_prlimit64)
458 #ifndef __NR_prlimit64
459 # define __NR_prlimit64 -1
461 #define __NR_sys_prlimit64 __NR_prlimit64
462 /* The glibc rlimit structure may not be that used by the underlying syscall */
463 struct host_rlimit64 {
467 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
468 const struct host_rlimit64 *, new_limit,
469 struct host_rlimit64 *, old_limit)
473 #if defined(TARGET_NR_timer_create)
474 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
475 static timer_t g_posix_timers[32] = { 0, } ;
477 static inline int next_free_host_timer(void)
480 /* FIXME: Does finding the next free slot require a lock? */
481 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
482 if (g_posix_timers[k] == 0) {
483 g_posix_timers[k] = (timer_t) 1;
491 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
493 static inline int regpairs_aligned(void *cpu_env) {
494 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
496 #elif defined(TARGET_MIPS)
497 static inline int regpairs_aligned(void *cpu_env) { return 1; }
498 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
499 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
500 * of registers which translates to the same as ARM/MIPS, because we start with
502 static inline int regpairs_aligned(void *cpu_env) { return 1; }
504 static inline int regpairs_aligned(void *cpu_env) { return 0; }
507 #define ERRNO_TABLE_SIZE 1200
509 /* target_to_host_errno_table[] is initialized from
510 * host_to_target_errno_table[] in syscall_init(). */
511 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
515 * This list is the union of errno values overridden in asm-<arch>/errno.h
516 * minus the errnos that are not actually generic to all archs.
518 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
519 [EAGAIN] = TARGET_EAGAIN,
520 [EIDRM] = TARGET_EIDRM,
521 [ECHRNG] = TARGET_ECHRNG,
522 [EL2NSYNC] = TARGET_EL2NSYNC,
523 [EL3HLT] = TARGET_EL3HLT,
524 [EL3RST] = TARGET_EL3RST,
525 [ELNRNG] = TARGET_ELNRNG,
526 [EUNATCH] = TARGET_EUNATCH,
527 [ENOCSI] = TARGET_ENOCSI,
528 [EL2HLT] = TARGET_EL2HLT,
529 [EDEADLK] = TARGET_EDEADLK,
530 [ENOLCK] = TARGET_ENOLCK,
531 [EBADE] = TARGET_EBADE,
532 [EBADR] = TARGET_EBADR,
533 [EXFULL] = TARGET_EXFULL,
534 [ENOANO] = TARGET_ENOANO,
535 [EBADRQC] = TARGET_EBADRQC,
536 [EBADSLT] = TARGET_EBADSLT,
537 [EBFONT] = TARGET_EBFONT,
538 [ENOSTR] = TARGET_ENOSTR,
539 [ENODATA] = TARGET_ENODATA,
540 [ETIME] = TARGET_ETIME,
541 [ENOSR] = TARGET_ENOSR,
542 [ENONET] = TARGET_ENONET,
543 [ENOPKG] = TARGET_ENOPKG,
544 [EREMOTE] = TARGET_EREMOTE,
545 [ENOLINK] = TARGET_ENOLINK,
546 [EADV] = TARGET_EADV,
547 [ESRMNT] = TARGET_ESRMNT,
548 [ECOMM] = TARGET_ECOMM,
549 [EPROTO] = TARGET_EPROTO,
550 [EDOTDOT] = TARGET_EDOTDOT,
551 [EMULTIHOP] = TARGET_EMULTIHOP,
552 [EBADMSG] = TARGET_EBADMSG,
553 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
554 [EOVERFLOW] = TARGET_EOVERFLOW,
555 [ENOTUNIQ] = TARGET_ENOTUNIQ,
556 [EBADFD] = TARGET_EBADFD,
557 [EREMCHG] = TARGET_EREMCHG,
558 [ELIBACC] = TARGET_ELIBACC,
559 [ELIBBAD] = TARGET_ELIBBAD,
560 [ELIBSCN] = TARGET_ELIBSCN,
561 [ELIBMAX] = TARGET_ELIBMAX,
562 [ELIBEXEC] = TARGET_ELIBEXEC,
563 [EILSEQ] = TARGET_EILSEQ,
564 [ENOSYS] = TARGET_ENOSYS,
565 [ELOOP] = TARGET_ELOOP,
566 [ERESTART] = TARGET_ERESTART,
567 [ESTRPIPE] = TARGET_ESTRPIPE,
568 [ENOTEMPTY] = TARGET_ENOTEMPTY,
569 [EUSERS] = TARGET_EUSERS,
570 [ENOTSOCK] = TARGET_ENOTSOCK,
571 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
572 [EMSGSIZE] = TARGET_EMSGSIZE,
573 [EPROTOTYPE] = TARGET_EPROTOTYPE,
574 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
575 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
576 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
577 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
578 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
579 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
580 [EADDRINUSE] = TARGET_EADDRINUSE,
581 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
582 [ENETDOWN] = TARGET_ENETDOWN,
583 [ENETUNREACH] = TARGET_ENETUNREACH,
584 [ENETRESET] = TARGET_ENETRESET,
585 [ECONNABORTED] = TARGET_ECONNABORTED,
586 [ECONNRESET] = TARGET_ECONNRESET,
587 [ENOBUFS] = TARGET_ENOBUFS,
588 [EISCONN] = TARGET_EISCONN,
589 [ENOTCONN] = TARGET_ENOTCONN,
590 [EUCLEAN] = TARGET_EUCLEAN,
591 [ENOTNAM] = TARGET_ENOTNAM,
592 [ENAVAIL] = TARGET_ENAVAIL,
593 [EISNAM] = TARGET_EISNAM,
594 [EREMOTEIO] = TARGET_EREMOTEIO,
595 [ESHUTDOWN] = TARGET_ESHUTDOWN,
596 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
597 [ETIMEDOUT] = TARGET_ETIMEDOUT,
598 [ECONNREFUSED] = TARGET_ECONNREFUSED,
599 [EHOSTDOWN] = TARGET_EHOSTDOWN,
600 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
601 [EALREADY] = TARGET_EALREADY,
602 [EINPROGRESS] = TARGET_EINPROGRESS,
603 [ESTALE] = TARGET_ESTALE,
604 [ECANCELED] = TARGET_ECANCELED,
605 [ENOMEDIUM] = TARGET_ENOMEDIUM,
606 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
608 [ENOKEY] = TARGET_ENOKEY,
611 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
614 [EKEYREVOKED] = TARGET_EKEYREVOKED,
617 [EKEYREJECTED] = TARGET_EKEYREJECTED,
620 [EOWNERDEAD] = TARGET_EOWNERDEAD,
622 #ifdef ENOTRECOVERABLE
623 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
627 static inline int host_to_target_errno(int err)
629 if(host_to_target_errno_table[err])
630 return host_to_target_errno_table[err];
634 static inline int target_to_host_errno(int err)
636 if (target_to_host_errno_table[err])
637 return target_to_host_errno_table[err];
641 static inline abi_long get_errno(abi_long ret)
644 return -host_to_target_errno(errno);
649 static inline int is_error(abi_long ret)
651 return (abi_ulong)ret >= (abi_ulong)(-4096);
654 char *target_strerror(int err)
656 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
659 return strerror(target_to_host_errno(err));
662 static inline int host_to_target_sock_type(int host_type)
666 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
668 target_type = TARGET_SOCK_DGRAM;
671 target_type = TARGET_SOCK_STREAM;
674 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
678 #if defined(SOCK_CLOEXEC)
679 if (host_type & SOCK_CLOEXEC) {
680 target_type |= TARGET_SOCK_CLOEXEC;
684 #if defined(SOCK_NONBLOCK)
685 if (host_type & SOCK_NONBLOCK) {
686 target_type |= TARGET_SOCK_NONBLOCK;
693 static abi_ulong target_brk;
694 static abi_ulong target_original_brk;
695 static abi_ulong brk_page;
697 void target_set_brk(abi_ulong new_brk)
699 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
700 brk_page = HOST_PAGE_ALIGN(target_brk);
703 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
704 #define DEBUGF_BRK(message, args...)
706 /* do_brk() must return target values and target errnos. */
707 abi_long do_brk(abi_ulong new_brk)
709 abi_long mapped_addr;
712 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
715 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
718 if (new_brk < target_original_brk) {
719 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
724 /* If the new brk is less than the highest page reserved to the
725 * target heap allocation, set it and we're almost done... */
726 if (new_brk <= brk_page) {
727 /* Heap contents are initialized to zero, as for anonymous
729 if (new_brk > target_brk) {
730 memset(g2h(target_brk), 0, new_brk - target_brk);
732 target_brk = new_brk;
733 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
737 /* We need to allocate more memory after the brk... Note that
738 * we don't use MAP_FIXED because that will map over the top of
739 * any existing mapping (like the one with the host libc or qemu
740 * itself); instead we treat "mapped but at wrong address" as
741 * a failure and unmap again.
743 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
744 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
745 PROT_READ|PROT_WRITE,
746 MAP_ANON|MAP_PRIVATE, 0, 0));
748 if (mapped_addr == brk_page) {
749 /* Heap contents are initialized to zero, as for anonymous
750 * mapped pages. Technically the new pages are already
751 * initialized to zero since they *are* anonymous mapped
752 * pages, however we have to take care with the contents that
753 * come from the remaining part of the previous page: it may
754 * contains garbage data due to a previous heap usage (grown
756 memset(g2h(target_brk), 0, brk_page - target_brk);
758 target_brk = new_brk;
759 brk_page = HOST_PAGE_ALIGN(target_brk);
760 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
763 } else if (mapped_addr != -1) {
764 /* Mapped but at wrong address, meaning there wasn't actually
765 * enough space for this brk.
767 target_munmap(mapped_addr, new_alloc_size);
769 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
772 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
775 #if defined(TARGET_ALPHA)
776 /* We (partially) emulate OSF/1 on Alpha, which requires we
777 return a proper errno, not an unchanged brk value. */
778 return -TARGET_ENOMEM;
780 /* For everything else, return the previous break. */
784 static inline abi_long copy_from_user_fdset(fd_set *fds,
785 abi_ulong target_fds_addr,
789 abi_ulong b, *target_fds;
791 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
792 if (!(target_fds = lock_user(VERIFY_READ,
794 sizeof(abi_ulong) * nw,
796 return -TARGET_EFAULT;
800 for (i = 0; i < nw; i++) {
801 /* grab the abi_ulong */
802 __get_user(b, &target_fds[i]);
803 for (j = 0; j < TARGET_ABI_BITS; j++) {
804 /* check the bit inside the abi_ulong */
811 unlock_user(target_fds, target_fds_addr, 0);
816 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
817 abi_ulong target_fds_addr,
820 if (target_fds_addr) {
821 if (copy_from_user_fdset(fds, target_fds_addr, n))
822 return -TARGET_EFAULT;
830 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
836 abi_ulong *target_fds;
838 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
839 if (!(target_fds = lock_user(VERIFY_WRITE,
841 sizeof(abi_ulong) * nw,
843 return -TARGET_EFAULT;
846 for (i = 0; i < nw; i++) {
848 for (j = 0; j < TARGET_ABI_BITS; j++) {
849 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
852 __put_user(v, &target_fds[i]);
855 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
860 #if defined(__alpha__)
866 static inline abi_long host_to_target_clock_t(long ticks)
868 #if HOST_HZ == TARGET_HZ
871 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
875 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
876 const struct rusage *rusage)
878 struct target_rusage *target_rusage;
880 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
881 return -TARGET_EFAULT;
882 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
883 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
884 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
885 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
886 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
887 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
888 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
889 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
890 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
891 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
892 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
893 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
894 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
895 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
896 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
897 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
898 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
899 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
900 unlock_user_struct(target_rusage, target_addr, 1);
905 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
907 abi_ulong target_rlim_swap;
910 target_rlim_swap = tswapal(target_rlim);
911 if (target_rlim_swap == TARGET_RLIM_INFINITY)
912 return RLIM_INFINITY;
914 result = target_rlim_swap;
915 if (target_rlim_swap != (rlim_t)result)
916 return RLIM_INFINITY;
921 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
923 abi_ulong target_rlim_swap;
926 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
927 target_rlim_swap = TARGET_RLIM_INFINITY;
929 target_rlim_swap = rlim;
930 result = tswapal(target_rlim_swap);
935 static inline int target_to_host_resource(int code)
938 case TARGET_RLIMIT_AS:
940 case TARGET_RLIMIT_CORE:
942 case TARGET_RLIMIT_CPU:
944 case TARGET_RLIMIT_DATA:
946 case TARGET_RLIMIT_FSIZE:
948 case TARGET_RLIMIT_LOCKS:
950 case TARGET_RLIMIT_MEMLOCK:
951 return RLIMIT_MEMLOCK;
952 case TARGET_RLIMIT_MSGQUEUE:
953 return RLIMIT_MSGQUEUE;
954 case TARGET_RLIMIT_NICE:
956 case TARGET_RLIMIT_NOFILE:
957 return RLIMIT_NOFILE;
958 case TARGET_RLIMIT_NPROC:
960 case TARGET_RLIMIT_RSS:
962 case TARGET_RLIMIT_RTPRIO:
963 return RLIMIT_RTPRIO;
964 case TARGET_RLIMIT_SIGPENDING:
965 return RLIMIT_SIGPENDING;
966 case TARGET_RLIMIT_STACK:
973 static inline abi_long copy_from_user_timeval(struct timeval *tv,
974 abi_ulong target_tv_addr)
976 struct target_timeval *target_tv;
978 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
979 return -TARGET_EFAULT;
981 __get_user(tv->tv_sec, &target_tv->tv_sec);
982 __get_user(tv->tv_usec, &target_tv->tv_usec);
984 unlock_user_struct(target_tv, target_tv_addr, 0);
989 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
990 const struct timeval *tv)
992 struct target_timeval *target_tv;
994 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
995 return -TARGET_EFAULT;
997 __put_user(tv->tv_sec, &target_tv->tv_sec);
998 __put_user(tv->tv_usec, &target_tv->tv_usec);
1000 unlock_user_struct(target_tv, target_tv_addr, 1);
1005 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1006 abi_ulong target_tz_addr)
1008 struct target_timezone *target_tz;
1010 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1011 return -TARGET_EFAULT;
1014 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1015 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1017 unlock_user_struct(target_tz, target_tz_addr, 0);
1022 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1025 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1026 abi_ulong target_mq_attr_addr)
1028 struct target_mq_attr *target_mq_attr;
1030 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1031 target_mq_attr_addr, 1))
1032 return -TARGET_EFAULT;
1034 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1035 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1036 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1037 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1039 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1044 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1045 const struct mq_attr *attr)
1047 struct target_mq_attr *target_mq_attr;
1049 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1050 target_mq_attr_addr, 0))
1051 return -TARGET_EFAULT;
1053 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1054 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1055 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1056 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1058 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1064 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1065 /* do_select() must return target values and target errnos. */
1066 static abi_long do_select(int n,
1067 abi_ulong rfd_addr, abi_ulong wfd_addr,
1068 abi_ulong efd_addr, abi_ulong target_tv_addr)
1070 fd_set rfds, wfds, efds;
1071 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1072 struct timeval tv, *tv_ptr;
1075 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1079 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1083 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1088 if (target_tv_addr) {
1089 if (copy_from_user_timeval(&tv, target_tv_addr))
1090 return -TARGET_EFAULT;
1096 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1098 if (!is_error(ret)) {
1099 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1100 return -TARGET_EFAULT;
1101 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1102 return -TARGET_EFAULT;
1103 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1104 return -TARGET_EFAULT;
1106 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1107 return -TARGET_EFAULT;
1114 static abi_long do_pipe2(int host_pipe[], int flags)
1117 return pipe2(host_pipe, flags);
1123 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1124 int flags, int is_pipe2)
1128 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1131 return get_errno(ret);
1133 /* Several targets have special calling conventions for the original
1134 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1136 #if defined(TARGET_ALPHA)
1137 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1138 return host_pipe[0];
1139 #elif defined(TARGET_MIPS)
1140 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1141 return host_pipe[0];
1142 #elif defined(TARGET_SH4)
1143 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1144 return host_pipe[0];
1145 #elif defined(TARGET_SPARC)
1146 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1147 return host_pipe[0];
1151 if (put_user_s32(host_pipe[0], pipedes)
1152 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1153 return -TARGET_EFAULT;
1154 return get_errno(ret);
1157 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1158 abi_ulong target_addr,
1161 struct target_ip_mreqn *target_smreqn;
1163 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1165 return -TARGET_EFAULT;
1166 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1167 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1168 if (len == sizeof(struct target_ip_mreqn))
1169 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1170 unlock_user(target_smreqn, target_addr, 0);
1175 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1176 abi_ulong target_addr,
1179 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1180 sa_family_t sa_family;
1181 struct target_sockaddr *target_saddr;
1183 if (fd_trans_target_to_host_addr(fd)) {
1184 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1187 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1189 return -TARGET_EFAULT;
1191 sa_family = tswap16(target_saddr->sa_family);
1193 /* Oops. The caller might send a incomplete sun_path; sun_path
1194 * must be terminated by \0 (see the manual page), but
1195 * unfortunately it is quite common to specify sockaddr_un
1196 * length as "strlen(x->sun_path)" while it should be
1197 * "strlen(...) + 1". We'll fix that here if needed.
1198 * Linux kernel has a similar feature.
1201 if (sa_family == AF_UNIX) {
1202 if (len < unix_maxlen && len > 0) {
1203 char *cp = (char*)target_saddr;
1205 if ( cp[len-1] && !cp[len] )
1208 if (len > unix_maxlen)
1212 memcpy(addr, target_saddr, len);
1213 addr->sa_family = sa_family;
1214 if (sa_family == AF_PACKET) {
1215 struct target_sockaddr_ll *lladdr;
1217 lladdr = (struct target_sockaddr_ll *)addr;
1218 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1219 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1221 unlock_user(target_saddr, target_addr, 0);
1226 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1227 struct sockaddr *addr,
1230 struct target_sockaddr *target_saddr;
1232 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1234 return -TARGET_EFAULT;
1235 memcpy(target_saddr, addr, len);
1236 target_saddr->sa_family = tswap16(addr->sa_family);
1237 unlock_user(target_saddr, target_addr, len);
1242 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1243 struct target_msghdr *target_msgh)
1245 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1246 abi_long msg_controllen;
1247 abi_ulong target_cmsg_addr;
1248 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1249 socklen_t space = 0;
1251 msg_controllen = tswapal(target_msgh->msg_controllen);
1252 if (msg_controllen < sizeof (struct target_cmsghdr))
1254 target_cmsg_addr = tswapal(target_msgh->msg_control);
1255 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1256 target_cmsg_start = target_cmsg;
1258 return -TARGET_EFAULT;
1260 while (cmsg && target_cmsg) {
1261 void *data = CMSG_DATA(cmsg);
1262 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1264 int len = tswapal(target_cmsg->cmsg_len)
1265 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1267 space += CMSG_SPACE(len);
1268 if (space > msgh->msg_controllen) {
1269 space -= CMSG_SPACE(len);
1270 /* This is a QEMU bug, since we allocated the payload
1271 * area ourselves (unlike overflow in host-to-target
1272 * conversion, which is just the guest giving us a buffer
1273 * that's too small). It can't happen for the payload types
1274 * we currently support; if it becomes an issue in future
1275 * we would need to improve our allocation strategy to
1276 * something more intelligent than "twice the size of the
1277 * target buffer we're reading from".
1279 gemu_log("Host cmsg overflow\n");
1283 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1284 cmsg->cmsg_level = SOL_SOCKET;
1286 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1288 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1289 cmsg->cmsg_len = CMSG_LEN(len);
1291 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1292 int *fd = (int *)data;
1293 int *target_fd = (int *)target_data;
1294 int i, numfds = len / sizeof(int);
1296 for (i = 0; i < numfds; i++) {
1297 __get_user(fd[i], target_fd + i);
1299 } else if (cmsg->cmsg_level == SOL_SOCKET
1300 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1301 struct ucred *cred = (struct ucred *)data;
1302 struct target_ucred *target_cred =
1303 (struct target_ucred *)target_data;
1305 __get_user(cred->pid, &target_cred->pid);
1306 __get_user(cred->uid, &target_cred->uid);
1307 __get_user(cred->gid, &target_cred->gid);
1309 gemu_log("Unsupported ancillary data: %d/%d\n",
1310 cmsg->cmsg_level, cmsg->cmsg_type);
1311 memcpy(data, target_data, len);
1314 cmsg = CMSG_NXTHDR(msgh, cmsg);
1315 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1318 unlock_user(target_cmsg, target_cmsg_addr, 0);
1320 msgh->msg_controllen = space;
1324 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1325 struct msghdr *msgh)
1327 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1328 abi_long msg_controllen;
1329 abi_ulong target_cmsg_addr;
1330 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1331 socklen_t space = 0;
1333 msg_controllen = tswapal(target_msgh->msg_controllen);
1334 if (msg_controllen < sizeof (struct target_cmsghdr))
1336 target_cmsg_addr = tswapal(target_msgh->msg_control);
1337 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1338 target_cmsg_start = target_cmsg;
1340 return -TARGET_EFAULT;
1342 while (cmsg && target_cmsg) {
1343 void *data = CMSG_DATA(cmsg);
1344 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1346 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1347 int tgt_len, tgt_space;
1349 /* We never copy a half-header but may copy half-data;
1350 * this is Linux's behaviour in put_cmsg(). Note that
1351 * truncation here is a guest problem (which we report
1352 * to the guest via the CTRUNC bit), unlike truncation
1353 * in target_to_host_cmsg, which is a QEMU bug.
1355 if (msg_controllen < sizeof(struct cmsghdr)) {
1356 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1360 if (cmsg->cmsg_level == SOL_SOCKET) {
1361 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1363 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1365 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1367 tgt_len = TARGET_CMSG_LEN(len);
1369 /* Payload types which need a different size of payload on
1370 * the target must adjust tgt_len here.
1372 switch (cmsg->cmsg_level) {
1374 switch (cmsg->cmsg_type) {
1376 tgt_len = sizeof(struct target_timeval);
1385 if (msg_controllen < tgt_len) {
1386 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1387 tgt_len = msg_controllen;
1390 /* We must now copy-and-convert len bytes of payload
1391 * into tgt_len bytes of destination space. Bear in mind
1392 * that in both source and destination we may be dealing
1393 * with a truncated value!
1395 switch (cmsg->cmsg_level) {
1397 switch (cmsg->cmsg_type) {
1400 int *fd = (int *)data;
1401 int *target_fd = (int *)target_data;
1402 int i, numfds = tgt_len / sizeof(int);
1404 for (i = 0; i < numfds; i++) {
1405 __put_user(fd[i], target_fd + i);
1411 struct timeval *tv = (struct timeval *)data;
1412 struct target_timeval *target_tv =
1413 (struct target_timeval *)target_data;
1415 if (len != sizeof(struct timeval) ||
1416 tgt_len != sizeof(struct target_timeval)) {
1420 /* copy struct timeval to target */
1421 __put_user(tv->tv_sec, &target_tv->tv_sec);
1422 __put_user(tv->tv_usec, &target_tv->tv_usec);
1425 case SCM_CREDENTIALS:
1427 struct ucred *cred = (struct ucred *)data;
1428 struct target_ucred *target_cred =
1429 (struct target_ucred *)target_data;
1431 __put_user(cred->pid, &target_cred->pid);
1432 __put_user(cred->uid, &target_cred->uid);
1433 __put_user(cred->gid, &target_cred->gid);
1443 gemu_log("Unsupported ancillary data: %d/%d\n",
1444 cmsg->cmsg_level, cmsg->cmsg_type);
1445 memcpy(target_data, data, MIN(len, tgt_len));
1446 if (tgt_len > len) {
1447 memset(target_data + len, 0, tgt_len - len);
1451 target_cmsg->cmsg_len = tswapal(tgt_len);
1452 tgt_space = TARGET_CMSG_SPACE(len);
1453 if (msg_controllen < tgt_space) {
1454 tgt_space = msg_controllen;
1456 msg_controllen -= tgt_space;
1458 cmsg = CMSG_NXTHDR(msgh, cmsg);
1459 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1462 unlock_user(target_cmsg, target_cmsg_addr, space);
1464 target_msgh->msg_controllen = tswapal(space);
1468 /* do_setsockopt() Must return target values and target errnos. */
1469 static abi_long do_setsockopt(int sockfd, int level, int optname,
1470 abi_ulong optval_addr, socklen_t optlen)
1474 struct ip_mreqn *ip_mreq;
1475 struct ip_mreq_source *ip_mreq_source;
1479 /* TCP options all take an 'int' value. */
1480 if (optlen < sizeof(uint32_t))
1481 return -TARGET_EINVAL;
1483 if (get_user_u32(val, optval_addr))
1484 return -TARGET_EFAULT;
1485 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1492 case IP_ROUTER_ALERT:
1496 case IP_MTU_DISCOVER:
1502 case IP_MULTICAST_TTL:
1503 case IP_MULTICAST_LOOP:
1505 if (optlen >= sizeof(uint32_t)) {
1506 if (get_user_u32(val, optval_addr))
1507 return -TARGET_EFAULT;
1508 } else if (optlen >= 1) {
1509 if (get_user_u8(val, optval_addr))
1510 return -TARGET_EFAULT;
1512 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1514 case IP_ADD_MEMBERSHIP:
1515 case IP_DROP_MEMBERSHIP:
1516 if (optlen < sizeof (struct target_ip_mreq) ||
1517 optlen > sizeof (struct target_ip_mreqn))
1518 return -TARGET_EINVAL;
1520 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1521 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1522 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1525 case IP_BLOCK_SOURCE:
1526 case IP_UNBLOCK_SOURCE:
1527 case IP_ADD_SOURCE_MEMBERSHIP:
1528 case IP_DROP_SOURCE_MEMBERSHIP:
1529 if (optlen != sizeof (struct target_ip_mreq_source))
1530 return -TARGET_EINVAL;
1532 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1533 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1534 unlock_user (ip_mreq_source, optval_addr, 0);
1543 case IPV6_MTU_DISCOVER:
1546 case IPV6_RECVPKTINFO:
1548 if (optlen < sizeof(uint32_t)) {
1549 return -TARGET_EINVAL;
1551 if (get_user_u32(val, optval_addr)) {
1552 return -TARGET_EFAULT;
1554 ret = get_errno(setsockopt(sockfd, level, optname,
1555 &val, sizeof(val)));
1564 /* struct icmp_filter takes an u32 value */
1565 if (optlen < sizeof(uint32_t)) {
1566 return -TARGET_EINVAL;
1569 if (get_user_u32(val, optval_addr)) {
1570 return -TARGET_EFAULT;
1572 ret = get_errno(setsockopt(sockfd, level, optname,
1573 &val, sizeof(val)));
1580 case TARGET_SOL_SOCKET:
1582 case TARGET_SO_RCVTIMEO:
1586 optname = SO_RCVTIMEO;
1589 if (optlen != sizeof(struct target_timeval)) {
1590 return -TARGET_EINVAL;
1593 if (copy_from_user_timeval(&tv, optval_addr)) {
1594 return -TARGET_EFAULT;
1597 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1601 case TARGET_SO_SNDTIMEO:
1602 optname = SO_SNDTIMEO;
1604 case TARGET_SO_ATTACH_FILTER:
1606 struct target_sock_fprog *tfprog;
1607 struct target_sock_filter *tfilter;
1608 struct sock_fprog fprog;
1609 struct sock_filter *filter;
1612 if (optlen != sizeof(*tfprog)) {
1613 return -TARGET_EINVAL;
1615 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1616 return -TARGET_EFAULT;
1618 if (!lock_user_struct(VERIFY_READ, tfilter,
1619 tswapal(tfprog->filter), 0)) {
1620 unlock_user_struct(tfprog, optval_addr, 1);
1621 return -TARGET_EFAULT;
1624 fprog.len = tswap16(tfprog->len);
1625 filter = g_try_new(struct sock_filter, fprog.len);
1626 if (filter == NULL) {
1627 unlock_user_struct(tfilter, tfprog->filter, 1);
1628 unlock_user_struct(tfprog, optval_addr, 1);
1629 return -TARGET_ENOMEM;
1631 for (i = 0; i < fprog.len; i++) {
1632 filter[i].code = tswap16(tfilter[i].code);
1633 filter[i].jt = tfilter[i].jt;
1634 filter[i].jf = tfilter[i].jf;
1635 filter[i].k = tswap32(tfilter[i].k);
1637 fprog.filter = filter;
1639 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1640 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1643 unlock_user_struct(tfilter, tfprog->filter, 1);
1644 unlock_user_struct(tfprog, optval_addr, 1);
1647 case TARGET_SO_BINDTODEVICE:
1649 char *dev_ifname, *addr_ifname;
1651 if (optlen > IFNAMSIZ - 1) {
1652 optlen = IFNAMSIZ - 1;
1654 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1656 return -TARGET_EFAULT;
1658 optname = SO_BINDTODEVICE;
1659 addr_ifname = alloca(IFNAMSIZ);
1660 memcpy(addr_ifname, dev_ifname, optlen);
1661 addr_ifname[optlen] = 0;
1662 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1663 addr_ifname, optlen));
1664 unlock_user (dev_ifname, optval_addr, 0);
1667 /* Options with 'int' argument. */
1668 case TARGET_SO_DEBUG:
1671 case TARGET_SO_REUSEADDR:
1672 optname = SO_REUSEADDR;
1674 case TARGET_SO_TYPE:
1677 case TARGET_SO_ERROR:
1680 case TARGET_SO_DONTROUTE:
1681 optname = SO_DONTROUTE;
1683 case TARGET_SO_BROADCAST:
1684 optname = SO_BROADCAST;
1686 case TARGET_SO_SNDBUF:
1687 optname = SO_SNDBUF;
1689 case TARGET_SO_SNDBUFFORCE:
1690 optname = SO_SNDBUFFORCE;
1692 case TARGET_SO_RCVBUF:
1693 optname = SO_RCVBUF;
1695 case TARGET_SO_RCVBUFFORCE:
1696 optname = SO_RCVBUFFORCE;
1698 case TARGET_SO_KEEPALIVE:
1699 optname = SO_KEEPALIVE;
1701 case TARGET_SO_OOBINLINE:
1702 optname = SO_OOBINLINE;
1704 case TARGET_SO_NO_CHECK:
1705 optname = SO_NO_CHECK;
1707 case TARGET_SO_PRIORITY:
1708 optname = SO_PRIORITY;
1711 case TARGET_SO_BSDCOMPAT:
1712 optname = SO_BSDCOMPAT;
1715 case TARGET_SO_PASSCRED:
1716 optname = SO_PASSCRED;
1718 case TARGET_SO_PASSSEC:
1719 optname = SO_PASSSEC;
1721 case TARGET_SO_TIMESTAMP:
1722 optname = SO_TIMESTAMP;
1724 case TARGET_SO_RCVLOWAT:
1725 optname = SO_RCVLOWAT;
1731 if (optlen < sizeof(uint32_t))
1732 return -TARGET_EINVAL;
1734 if (get_user_u32(val, optval_addr))
1735 return -TARGET_EFAULT;
1736 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1740 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1741 ret = -TARGET_ENOPROTOOPT;
1746 /* do_getsockopt() Must return target values and target errnos. */
1747 static abi_long do_getsockopt(int sockfd, int level, int optname,
1748 abi_ulong optval_addr, abi_ulong optlen)
1755 case TARGET_SOL_SOCKET:
1758 /* These don't just return a single integer */
1759 case TARGET_SO_LINGER:
1760 case TARGET_SO_RCVTIMEO:
1761 case TARGET_SO_SNDTIMEO:
1762 case TARGET_SO_PEERNAME:
1764 case TARGET_SO_PEERCRED: {
1767 struct target_ucred *tcr;
1769 if (get_user_u32(len, optlen)) {
1770 return -TARGET_EFAULT;
1773 return -TARGET_EINVAL;
1777 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1785 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1786 return -TARGET_EFAULT;
1788 __put_user(cr.pid, &tcr->pid);
1789 __put_user(cr.uid, &tcr->uid);
1790 __put_user(cr.gid, &tcr->gid);
1791 unlock_user_struct(tcr, optval_addr, 1);
1792 if (put_user_u32(len, optlen)) {
1793 return -TARGET_EFAULT;
1797 /* Options with 'int' argument. */
1798 case TARGET_SO_DEBUG:
1801 case TARGET_SO_REUSEADDR:
1802 optname = SO_REUSEADDR;
1804 case TARGET_SO_TYPE:
1807 case TARGET_SO_ERROR:
1810 case TARGET_SO_DONTROUTE:
1811 optname = SO_DONTROUTE;
1813 case TARGET_SO_BROADCAST:
1814 optname = SO_BROADCAST;
1816 case TARGET_SO_SNDBUF:
1817 optname = SO_SNDBUF;
1819 case TARGET_SO_RCVBUF:
1820 optname = SO_RCVBUF;
1822 case TARGET_SO_KEEPALIVE:
1823 optname = SO_KEEPALIVE;
1825 case TARGET_SO_OOBINLINE:
1826 optname = SO_OOBINLINE;
1828 case TARGET_SO_NO_CHECK:
1829 optname = SO_NO_CHECK;
1831 case TARGET_SO_PRIORITY:
1832 optname = SO_PRIORITY;
1835 case TARGET_SO_BSDCOMPAT:
1836 optname = SO_BSDCOMPAT;
1839 case TARGET_SO_PASSCRED:
1840 optname = SO_PASSCRED;
1842 case TARGET_SO_TIMESTAMP:
1843 optname = SO_TIMESTAMP;
1845 case TARGET_SO_RCVLOWAT:
1846 optname = SO_RCVLOWAT;
1848 case TARGET_SO_ACCEPTCONN:
1849 optname = SO_ACCEPTCONN;
1856 /* TCP options all take an 'int' value. */
1858 if (get_user_u32(len, optlen))
1859 return -TARGET_EFAULT;
1861 return -TARGET_EINVAL;
1863 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1866 if (optname == SO_TYPE) {
1867 val = host_to_target_sock_type(val);
1872 if (put_user_u32(val, optval_addr))
1873 return -TARGET_EFAULT;
1875 if (put_user_u8(val, optval_addr))
1876 return -TARGET_EFAULT;
1878 if (put_user_u32(len, optlen))
1879 return -TARGET_EFAULT;
1886 case IP_ROUTER_ALERT:
1890 case IP_MTU_DISCOVER:
1896 case IP_MULTICAST_TTL:
1897 case IP_MULTICAST_LOOP:
1898 if (get_user_u32(len, optlen))
1899 return -TARGET_EFAULT;
1901 return -TARGET_EINVAL;
1903 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1906 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1908 if (put_user_u32(len, optlen)
1909 || put_user_u8(val, optval_addr))
1910 return -TARGET_EFAULT;
1912 if (len > sizeof(int))
1914 if (put_user_u32(len, optlen)
1915 || put_user_u32(val, optval_addr))
1916 return -TARGET_EFAULT;
1920 ret = -TARGET_ENOPROTOOPT;
1926 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1928 ret = -TARGET_EOPNOTSUPP;
1934 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1935 int count, int copy)
1937 struct target_iovec *target_vec;
1939 abi_ulong total_len, max_len;
1942 bool bad_address = false;
1948 if (count < 0 || count > IOV_MAX) {
1953 vec = g_try_new0(struct iovec, count);
1959 target_vec = lock_user(VERIFY_READ, target_addr,
1960 count * sizeof(struct target_iovec), 1);
1961 if (target_vec == NULL) {
1966 /* ??? If host page size > target page size, this will result in a
1967 value larger than what we can actually support. */
1968 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1971 for (i = 0; i < count; i++) {
1972 abi_ulong base = tswapal(target_vec[i].iov_base);
1973 abi_long len = tswapal(target_vec[i].iov_len);
1978 } else if (len == 0) {
1979 /* Zero length pointer is ignored. */
1980 vec[i].iov_base = 0;
1982 vec[i].iov_base = lock_user(type, base, len, copy);
1983 /* If the first buffer pointer is bad, this is a fault. But
1984 * subsequent bad buffers will result in a partial write; this
1985 * is realized by filling the vector with null pointers and
1987 if (!vec[i].iov_base) {
1998 if (len > max_len - total_len) {
1999 len = max_len - total_len;
2002 vec[i].iov_len = len;
2006 unlock_user(target_vec, target_addr, 0);
2011 if (tswapal(target_vec[i].iov_len) > 0) {
2012 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2015 unlock_user(target_vec, target_addr, 0);
2022 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2023 int count, int copy)
2025 struct target_iovec *target_vec;
2028 target_vec = lock_user(VERIFY_READ, target_addr,
2029 count * sizeof(struct target_iovec), 1);
2031 for (i = 0; i < count; i++) {
2032 abi_ulong base = tswapal(target_vec[i].iov_base);
2033 abi_long len = tswapal(target_vec[i].iov_len);
2037 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2039 unlock_user(target_vec, target_addr, 0);
2045 static inline int target_to_host_sock_type(int *type)
2048 int target_type = *type;
2050 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2051 case TARGET_SOCK_DGRAM:
2052 host_type = SOCK_DGRAM;
2054 case TARGET_SOCK_STREAM:
2055 host_type = SOCK_STREAM;
2058 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2061 if (target_type & TARGET_SOCK_CLOEXEC) {
2062 #if defined(SOCK_CLOEXEC)
2063 host_type |= SOCK_CLOEXEC;
2065 return -TARGET_EINVAL;
2068 if (target_type & TARGET_SOCK_NONBLOCK) {
2069 #if defined(SOCK_NONBLOCK)
2070 host_type |= SOCK_NONBLOCK;
2071 #elif !defined(O_NONBLOCK)
2072 return -TARGET_EINVAL;
2079 /* Try to emulate socket type flags after socket creation. */
2080 static int sock_flags_fixup(int fd, int target_type)
2082 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2083 if (target_type & TARGET_SOCK_NONBLOCK) {
2084 int flags = fcntl(fd, F_GETFL);
2085 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2087 return -TARGET_EINVAL;
2094 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2095 abi_ulong target_addr,
2098 struct sockaddr *addr = host_addr;
2099 struct target_sockaddr *target_saddr;
2101 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2102 if (!target_saddr) {
2103 return -TARGET_EFAULT;
2106 memcpy(addr, target_saddr, len);
2107 addr->sa_family = tswap16(target_saddr->sa_family);
2108 /* spkt_protocol is big-endian */
2110 unlock_user(target_saddr, target_addr, 0);
2114 static TargetFdTrans target_packet_trans = {
2115 .target_to_host_addr = packet_target_to_host_sockaddr,
2118 /* do_socket() Must return target values and target errnos. */
2119 static abi_long do_socket(int domain, int type, int protocol)
2121 int target_type = type;
2124 ret = target_to_host_sock_type(&type);
2129 if (domain == PF_NETLINK)
2130 return -TARGET_EAFNOSUPPORT;
2132 if (domain == AF_PACKET ||
2133 (domain == AF_INET && type == SOCK_PACKET)) {
2134 protocol = tswap16(protocol);
2137 ret = get_errno(socket(domain, type, protocol));
2139 ret = sock_flags_fixup(ret, target_type);
2140 if (type == SOCK_PACKET) {
2141 /* Manage an obsolete case :
2142 * if socket type is SOCK_PACKET, bind by name
2144 fd_trans_register(ret, &target_packet_trans);
2150 /* do_bind() Must return target values and target errnos. */
2151 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2157 if ((int)addrlen < 0) {
2158 return -TARGET_EINVAL;
2161 addr = alloca(addrlen+1);
2163 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2167 return get_errno(bind(sockfd, addr, addrlen));
2170 /* do_connect() Must return target values and target errnos. */
2171 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2177 if ((int)addrlen < 0) {
2178 return -TARGET_EINVAL;
2181 addr = alloca(addrlen+1);
2183 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2187 return get_errno(connect(sockfd, addr, addrlen));
2190 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2191 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2192 int flags, int send)
2198 abi_ulong target_vec;
2200 if (msgp->msg_name) {
2201 msg.msg_namelen = tswap32(msgp->msg_namelen);
2202 msg.msg_name = alloca(msg.msg_namelen+1);
2203 ret = target_to_host_sockaddr(fd, msg.msg_name,
2204 tswapal(msgp->msg_name),
2210 msg.msg_name = NULL;
2211 msg.msg_namelen = 0;
2213 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2214 msg.msg_control = alloca(msg.msg_controllen);
2215 msg.msg_flags = tswap32(msgp->msg_flags);
2217 count = tswapal(msgp->msg_iovlen);
2218 target_vec = tswapal(msgp->msg_iov);
2219 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2220 target_vec, count, send);
2222 ret = -host_to_target_errno(errno);
2225 msg.msg_iovlen = count;
2229 ret = target_to_host_cmsg(&msg, msgp);
2231 ret = get_errno(sendmsg(fd, &msg, flags));
2233 ret = get_errno(recvmsg(fd, &msg, flags));
2234 if (!is_error(ret)) {
2236 ret = host_to_target_cmsg(msgp, &msg);
2237 if (!is_error(ret)) {
2238 msgp->msg_namelen = tswap32(msg.msg_namelen);
2239 if (msg.msg_name != NULL) {
2240 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2241 msg.msg_name, msg.msg_namelen);
2253 unlock_iovec(vec, target_vec, count, !send);
2258 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2259 int flags, int send)
2262 struct target_msghdr *msgp;
2264 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2268 return -TARGET_EFAULT;
2270 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2271 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2275 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2276 * so it might not have this *mmsg-specific flag either.
2278 #ifndef MSG_WAITFORONE
2279 #define MSG_WAITFORONE 0x10000
2282 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2283 unsigned int vlen, unsigned int flags,
2286 struct target_mmsghdr *mmsgp;
2290 if (vlen > UIO_MAXIOV) {
2294 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2296 return -TARGET_EFAULT;
2299 for (i = 0; i < vlen; i++) {
2300 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2301 if (is_error(ret)) {
2304 mmsgp[i].msg_len = tswap32(ret);
2305 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2306 if (flags & MSG_WAITFORONE) {
2307 flags |= MSG_DONTWAIT;
2311 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2313 /* Return number of datagrams sent if we sent any at all;
2314 * otherwise return the error.
2322 /* If we don't have a system accept4() then just call accept.
2323 * The callsites to do_accept4() will ensure that they don't
2324 * pass a non-zero flags argument in this config.
2326 #ifndef CONFIG_ACCEPT4
2327 static inline int accept4(int sockfd, struct sockaddr *addr,
2328 socklen_t *addrlen, int flags)
2331 return accept(sockfd, addr, addrlen);
2335 /* do_accept4() Must return target values and target errnos. */
2336 static abi_long do_accept4(int fd, abi_ulong target_addr,
2337 abi_ulong target_addrlen_addr, int flags)
2344 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2346 if (target_addr == 0) {
2347 return get_errno(accept4(fd, NULL, NULL, host_flags));
2350 /* linux returns EINVAL if addrlen pointer is invalid */
2351 if (get_user_u32(addrlen, target_addrlen_addr))
2352 return -TARGET_EINVAL;
2354 if ((int)addrlen < 0) {
2355 return -TARGET_EINVAL;
2358 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2359 return -TARGET_EINVAL;
2361 addr = alloca(addrlen);
2363 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2364 if (!is_error(ret)) {
2365 host_to_target_sockaddr(target_addr, addr, addrlen);
2366 if (put_user_u32(addrlen, target_addrlen_addr))
2367 ret = -TARGET_EFAULT;
2372 /* do_getpeername() Must return target values and target errnos. */
2373 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2374 abi_ulong target_addrlen_addr)
2380 if (get_user_u32(addrlen, target_addrlen_addr))
2381 return -TARGET_EFAULT;
2383 if ((int)addrlen < 0) {
2384 return -TARGET_EINVAL;
2387 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2388 return -TARGET_EFAULT;
2390 addr = alloca(addrlen);
2392 ret = get_errno(getpeername(fd, addr, &addrlen));
2393 if (!is_error(ret)) {
2394 host_to_target_sockaddr(target_addr, addr, addrlen);
2395 if (put_user_u32(addrlen, target_addrlen_addr))
2396 ret = -TARGET_EFAULT;
2401 /* do_getsockname() Must return target values and target errnos. */
2402 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2403 abi_ulong target_addrlen_addr)
2409 if (get_user_u32(addrlen, target_addrlen_addr))
2410 return -TARGET_EFAULT;
2412 if ((int)addrlen < 0) {
2413 return -TARGET_EINVAL;
2416 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2417 return -TARGET_EFAULT;
2419 addr = alloca(addrlen);
2421 ret = get_errno(getsockname(fd, addr, &addrlen));
2422 if (!is_error(ret)) {
2423 host_to_target_sockaddr(target_addr, addr, addrlen);
2424 if (put_user_u32(addrlen, target_addrlen_addr))
2425 ret = -TARGET_EFAULT;
2430 /* do_socketpair() Must return target values and target errnos. */
2431 static abi_long do_socketpair(int domain, int type, int protocol,
2432 abi_ulong target_tab_addr)
2437 target_to_host_sock_type(&type);
2439 ret = get_errno(socketpair(domain, type, protocol, tab));
2440 if (!is_error(ret)) {
2441 if (put_user_s32(tab[0], target_tab_addr)
2442 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2443 ret = -TARGET_EFAULT;
2448 /* do_sendto() Must return target values and target errnos. */
2449 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2450 abi_ulong target_addr, socklen_t addrlen)
2456 if ((int)addrlen < 0) {
2457 return -TARGET_EINVAL;
2460 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2462 return -TARGET_EFAULT;
2464 addr = alloca(addrlen+1);
2465 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2467 unlock_user(host_msg, msg, 0);
2470 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2472 ret = get_errno(send(fd, host_msg, len, flags));
2474 unlock_user(host_msg, msg, 0);
2478 /* do_recvfrom() Must return target values and target errnos. */
2479 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2480 abi_ulong target_addr,
2481 abi_ulong target_addrlen)
2488 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2490 return -TARGET_EFAULT;
2492 if (get_user_u32(addrlen, target_addrlen)) {
2493 ret = -TARGET_EFAULT;
2496 if ((int)addrlen < 0) {
2497 ret = -TARGET_EINVAL;
2500 addr = alloca(addrlen);
2501 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2503 addr = NULL; /* To keep compiler quiet. */
2504 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2506 if (!is_error(ret)) {
2508 host_to_target_sockaddr(target_addr, addr, addrlen);
2509 if (put_user_u32(addrlen, target_addrlen)) {
2510 ret = -TARGET_EFAULT;
2514 unlock_user(host_msg, msg, len);
2517 unlock_user(host_msg, msg, 0);
2522 #ifdef TARGET_NR_socketcall
2523 /* do_socketcall() Must return target values and target errnos. */
2524 static abi_long do_socketcall(int num, abi_ulong vptr)
2526 static const unsigned ac[] = { /* number of arguments per call */
2527 [SOCKOP_socket] = 3, /* domain, type, protocol */
2528 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2529 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2530 [SOCKOP_listen] = 2, /* sockfd, backlog */
2531 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2532 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2533 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2534 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2535 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2536 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2537 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2538 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2539 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2540 [SOCKOP_shutdown] = 2, /* sockfd, how */
2541 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2542 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2543 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2544 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2545 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2546 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2548 abi_long a[6]; /* max 6 args */
2550 /* first, collect the arguments in a[] according to ac[] */
2551 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2553 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2554 for (i = 0; i < ac[num]; ++i) {
2555 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2556 return -TARGET_EFAULT;
2561 /* now when we have the args, actually handle the call */
2563 case SOCKOP_socket: /* domain, type, protocol */
2564 return do_socket(a[0], a[1], a[2]);
2565 case SOCKOP_bind: /* sockfd, addr, addrlen */
2566 return do_bind(a[0], a[1], a[2]);
2567 case SOCKOP_connect: /* sockfd, addr, addrlen */
2568 return do_connect(a[0], a[1], a[2]);
2569 case SOCKOP_listen: /* sockfd, backlog */
2570 return get_errno(listen(a[0], a[1]));
2571 case SOCKOP_accept: /* sockfd, addr, addrlen */
2572 return do_accept4(a[0], a[1], a[2], 0);
2573 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2574 return do_accept4(a[0], a[1], a[2], a[3]);
2575 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2576 return do_getsockname(a[0], a[1], a[2]);
2577 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2578 return do_getpeername(a[0], a[1], a[2]);
2579 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2580 return do_socketpair(a[0], a[1], a[2], a[3]);
2581 case SOCKOP_send: /* sockfd, msg, len, flags */
2582 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2583 case SOCKOP_recv: /* sockfd, msg, len, flags */
2584 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2585 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2586 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2587 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2588 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2589 case SOCKOP_shutdown: /* sockfd, how */
2590 return get_errno(shutdown(a[0], a[1]));
2591 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2592 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2593 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2594 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2595 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
2596 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
2597 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
2598 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
2599 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2600 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2601 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2602 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2604 gemu_log("Unsupported socketcall: %d\n", num);
2605 return -TARGET_ENOSYS;
2610 #define N_SHM_REGIONS 32
2612 static struct shm_region {
2615 } shm_regions[N_SHM_REGIONS];
2617 struct target_semid_ds
2619 struct target_ipc_perm sem_perm;
2620 abi_ulong sem_otime;
2621 #if !defined(TARGET_PPC64)
2622 abi_ulong __unused1;
2624 abi_ulong sem_ctime;
2625 #if !defined(TARGET_PPC64)
2626 abi_ulong __unused2;
2628 abi_ulong sem_nsems;
2629 abi_ulong __unused3;
2630 abi_ulong __unused4;
2633 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2634 abi_ulong target_addr)
2636 struct target_ipc_perm *target_ip;
2637 struct target_semid_ds *target_sd;
2639 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2640 return -TARGET_EFAULT;
2641 target_ip = &(target_sd->sem_perm);
2642 host_ip->__key = tswap32(target_ip->__key);
2643 host_ip->uid = tswap32(target_ip->uid);
2644 host_ip->gid = tswap32(target_ip->gid);
2645 host_ip->cuid = tswap32(target_ip->cuid);
2646 host_ip->cgid = tswap32(target_ip->cgid);
2647 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2648 host_ip->mode = tswap32(target_ip->mode);
2650 host_ip->mode = tswap16(target_ip->mode);
2652 #if defined(TARGET_PPC)
2653 host_ip->__seq = tswap32(target_ip->__seq);
2655 host_ip->__seq = tswap16(target_ip->__seq);
2657 unlock_user_struct(target_sd, target_addr, 0);
2661 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2662 struct ipc_perm *host_ip)
2664 struct target_ipc_perm *target_ip;
2665 struct target_semid_ds *target_sd;
2667 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2668 return -TARGET_EFAULT;
2669 target_ip = &(target_sd->sem_perm);
2670 target_ip->__key = tswap32(host_ip->__key);
2671 target_ip->uid = tswap32(host_ip->uid);
2672 target_ip->gid = tswap32(host_ip->gid);
2673 target_ip->cuid = tswap32(host_ip->cuid);
2674 target_ip->cgid = tswap32(host_ip->cgid);
2675 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2676 target_ip->mode = tswap32(host_ip->mode);
2678 target_ip->mode = tswap16(host_ip->mode);
2680 #if defined(TARGET_PPC)
2681 target_ip->__seq = tswap32(host_ip->__seq);
2683 target_ip->__seq = tswap16(host_ip->__seq);
2685 unlock_user_struct(target_sd, target_addr, 1);
2689 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2690 abi_ulong target_addr)
2692 struct target_semid_ds *target_sd;
2694 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2695 return -TARGET_EFAULT;
2696 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2697 return -TARGET_EFAULT;
2698 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2699 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2700 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2701 unlock_user_struct(target_sd, target_addr, 0);
2705 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2706 struct semid_ds *host_sd)
2708 struct target_semid_ds *target_sd;
2710 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2711 return -TARGET_EFAULT;
2712 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2713 return -TARGET_EFAULT;
2714 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2715 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2716 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2717 unlock_user_struct(target_sd, target_addr, 1);
2721 struct target_seminfo {
2734 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2735 struct seminfo *host_seminfo)
2737 struct target_seminfo *target_seminfo;
2738 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2739 return -TARGET_EFAULT;
2740 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2741 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2742 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2743 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2744 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2745 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2746 __put_user(host_seminfo->semume, &target_seminfo->semume);
2747 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2748 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2749 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2750 unlock_user_struct(target_seminfo, target_addr, 1);
2756 struct semid_ds *buf;
2757 unsigned short *array;
2758 struct seminfo *__buf;
2761 union target_semun {
2768 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2769 abi_ulong target_addr)
2772 unsigned short *array;
2774 struct semid_ds semid_ds;
2777 semun.buf = &semid_ds;
2779 ret = semctl(semid, 0, IPC_STAT, semun);
2781 return get_errno(ret);
2783 nsems = semid_ds.sem_nsems;
2785 *host_array = g_try_new(unsigned short, nsems);
2787 return -TARGET_ENOMEM;
2789 array = lock_user(VERIFY_READ, target_addr,
2790 nsems*sizeof(unsigned short), 1);
2792 g_free(*host_array);
2793 return -TARGET_EFAULT;
2796 for(i=0; i<nsems; i++) {
2797 __get_user((*host_array)[i], &array[i]);
2799 unlock_user(array, target_addr, 0);
2804 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2805 unsigned short **host_array)
2808 unsigned short *array;
2810 struct semid_ds semid_ds;
2813 semun.buf = &semid_ds;
2815 ret = semctl(semid, 0, IPC_STAT, semun);
2817 return get_errno(ret);
2819 nsems = semid_ds.sem_nsems;
2821 array = lock_user(VERIFY_WRITE, target_addr,
2822 nsems*sizeof(unsigned short), 0);
2824 return -TARGET_EFAULT;
2826 for(i=0; i<nsems; i++) {
2827 __put_user((*host_array)[i], &array[i]);
2829 g_free(*host_array);
2830 unlock_user(array, target_addr, 1);
2835 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2836 abi_ulong target_arg)
2838 union target_semun target_su = { .buf = target_arg };
2840 struct semid_ds dsarg;
2841 unsigned short *array = NULL;
2842 struct seminfo seminfo;
2843 abi_long ret = -TARGET_EINVAL;
2850 /* In 64 bit cross-endian situations, we will erroneously pick up
2851 * the wrong half of the union for the "val" element. To rectify
2852 * this, the entire 8-byte structure is byteswapped, followed by
2853 * a swap of the 4 byte val field. In other cases, the data is
2854 * already in proper host byte order. */
2855 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2856 target_su.buf = tswapal(target_su.buf);
2857 arg.val = tswap32(target_su.val);
2859 arg.val = target_su.val;
2861 ret = get_errno(semctl(semid, semnum, cmd, arg));
2865 err = target_to_host_semarray(semid, &array, target_su.array);
2869 ret = get_errno(semctl(semid, semnum, cmd, arg));
2870 err = host_to_target_semarray(semid, target_su.array, &array);
2877 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2881 ret = get_errno(semctl(semid, semnum, cmd, arg));
2882 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2888 arg.__buf = &seminfo;
2889 ret = get_errno(semctl(semid, semnum, cmd, arg));
2890 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2898 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2905 struct target_sembuf {
2906 unsigned short sem_num;
2911 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2912 abi_ulong target_addr,
2915 struct target_sembuf *target_sembuf;
2918 target_sembuf = lock_user(VERIFY_READ, target_addr,
2919 nsops*sizeof(struct target_sembuf), 1);
2921 return -TARGET_EFAULT;
2923 for(i=0; i<nsops; i++) {
2924 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2925 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2926 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2929 unlock_user(target_sembuf, target_addr, 0);
2934 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2936 struct sembuf sops[nsops];
2938 if (target_to_host_sembuf(sops, ptr, nsops))
2939 return -TARGET_EFAULT;
2941 return get_errno(semop(semid, sops, nsops));
2944 struct target_msqid_ds
2946 struct target_ipc_perm msg_perm;
2947 abi_ulong msg_stime;
2948 #if TARGET_ABI_BITS == 32
2949 abi_ulong __unused1;
2951 abi_ulong msg_rtime;
2952 #if TARGET_ABI_BITS == 32
2953 abi_ulong __unused2;
2955 abi_ulong msg_ctime;
2956 #if TARGET_ABI_BITS == 32
2957 abi_ulong __unused3;
2959 abi_ulong __msg_cbytes;
2961 abi_ulong msg_qbytes;
2962 abi_ulong msg_lspid;
2963 abi_ulong msg_lrpid;
2964 abi_ulong __unused4;
2965 abi_ulong __unused5;
2968 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2969 abi_ulong target_addr)
2971 struct target_msqid_ds *target_md;
2973 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2974 return -TARGET_EFAULT;
2975 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2976 return -TARGET_EFAULT;
2977 host_md->msg_stime = tswapal(target_md->msg_stime);
2978 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2979 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2980 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2981 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2982 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2983 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2984 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2985 unlock_user_struct(target_md, target_addr, 0);
2989 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2990 struct msqid_ds *host_md)
2992 struct target_msqid_ds *target_md;
2994 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2995 return -TARGET_EFAULT;
2996 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2997 return -TARGET_EFAULT;
2998 target_md->msg_stime = tswapal(host_md->msg_stime);
2999 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3000 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3001 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3002 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3003 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3004 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3005 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3006 unlock_user_struct(target_md, target_addr, 1);
3010 struct target_msginfo {
3018 unsigned short int msgseg;
3021 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3022 struct msginfo *host_msginfo)
3024 struct target_msginfo *target_msginfo;
3025 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3026 return -TARGET_EFAULT;
3027 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3028 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3029 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3030 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3031 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3032 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3033 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3034 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3035 unlock_user_struct(target_msginfo, target_addr, 1);
3039 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3041 struct msqid_ds dsarg;
3042 struct msginfo msginfo;
3043 abi_long ret = -TARGET_EINVAL;
3051 if (target_to_host_msqid_ds(&dsarg,ptr))
3052 return -TARGET_EFAULT;
3053 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3054 if (host_to_target_msqid_ds(ptr,&dsarg))
3055 return -TARGET_EFAULT;
3058 ret = get_errno(msgctl(msgid, cmd, NULL));
3062 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3063 if (host_to_target_msginfo(ptr, &msginfo))
3064 return -TARGET_EFAULT;
3071 struct target_msgbuf {
3076 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3077 ssize_t msgsz, int msgflg)
3079 struct target_msgbuf *target_mb;
3080 struct msgbuf *host_mb;
3084 return -TARGET_EINVAL;
3087 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3088 return -TARGET_EFAULT;
3089 host_mb = g_try_malloc(msgsz + sizeof(long));
3091 unlock_user_struct(target_mb, msgp, 0);
3092 return -TARGET_ENOMEM;
3094 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3095 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3096 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
3098 unlock_user_struct(target_mb, msgp, 0);
3103 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3104 unsigned int msgsz, abi_long msgtyp,
3107 struct target_msgbuf *target_mb;
3109 struct msgbuf *host_mb;
3112 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3113 return -TARGET_EFAULT;
3115 host_mb = g_malloc(msgsz+sizeof(long));
3116 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3119 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3120 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3121 if (!target_mtext) {
3122 ret = -TARGET_EFAULT;
3125 memcpy(target_mb->mtext, host_mb->mtext, ret);
3126 unlock_user(target_mtext, target_mtext_addr, ret);
3129 target_mb->mtype = tswapal(host_mb->mtype);
3133 unlock_user_struct(target_mb, msgp, 1);
3138 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3139 abi_ulong target_addr)
3141 struct target_shmid_ds *target_sd;
3143 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3144 return -TARGET_EFAULT;
3145 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3146 return -TARGET_EFAULT;
3147 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3148 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3149 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3150 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3151 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3152 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3153 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3154 unlock_user_struct(target_sd, target_addr, 0);
3158 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3159 struct shmid_ds *host_sd)
3161 struct target_shmid_ds *target_sd;
3163 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3164 return -TARGET_EFAULT;
3165 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3166 return -TARGET_EFAULT;
3167 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3168 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3169 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3170 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3171 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3172 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3173 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3174 unlock_user_struct(target_sd, target_addr, 1);
3178 struct target_shminfo {
3186 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3187 struct shminfo *host_shminfo)
3189 struct target_shminfo *target_shminfo;
3190 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3191 return -TARGET_EFAULT;
3192 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3193 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3194 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3195 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3196 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3197 unlock_user_struct(target_shminfo, target_addr, 1);
3201 struct target_shm_info {
3206 abi_ulong swap_attempts;
3207 abi_ulong swap_successes;
3210 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3211 struct shm_info *host_shm_info)
3213 struct target_shm_info *target_shm_info;
3214 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3215 return -TARGET_EFAULT;
3216 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3217 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3218 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3219 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3220 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3221 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3222 unlock_user_struct(target_shm_info, target_addr, 1);
3226 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3228 struct shmid_ds dsarg;
3229 struct shminfo shminfo;
3230 struct shm_info shm_info;
3231 abi_long ret = -TARGET_EINVAL;
3239 if (target_to_host_shmid_ds(&dsarg, buf))
3240 return -TARGET_EFAULT;
3241 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3242 if (host_to_target_shmid_ds(buf, &dsarg))
3243 return -TARGET_EFAULT;
3246 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3247 if (host_to_target_shminfo(buf, &shminfo))
3248 return -TARGET_EFAULT;
3251 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3252 if (host_to_target_shm_info(buf, &shm_info))
3253 return -TARGET_EFAULT;
3258 ret = get_errno(shmctl(shmid, cmd, NULL));
3265 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3269 struct shmid_ds shm_info;
3272 /* find out the length of the shared memory segment */
3273 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3274 if (is_error(ret)) {
3275 /* can't get length, bail out */
3282 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3284 abi_ulong mmap_start;
3286 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3288 if (mmap_start == -1) {
3290 host_raddr = (void *)-1;
3292 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3295 if (host_raddr == (void *)-1) {
3297 return get_errno((long)host_raddr);
3299 raddr=h2g((unsigned long)host_raddr);
3301 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3302 PAGE_VALID | PAGE_READ |
3303 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3305 for (i = 0; i < N_SHM_REGIONS; i++) {
3306 if (shm_regions[i].start == 0) {
3307 shm_regions[i].start = raddr;
3308 shm_regions[i].size = shm_info.shm_segsz;
3318 static inline abi_long do_shmdt(abi_ulong shmaddr)
3322 for (i = 0; i < N_SHM_REGIONS; ++i) {
3323 if (shm_regions[i].start == shmaddr) {
3324 shm_regions[i].start = 0;
3325 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3330 return get_errno(shmdt(g2h(shmaddr)));
3333 #ifdef TARGET_NR_ipc
3334 /* ??? This only works with linear mappings. */
3335 /* do_ipc() must return target values and target errnos. */
3336 static abi_long do_ipc(unsigned int call, abi_long first,
3337 abi_long second, abi_long third,
3338 abi_long ptr, abi_long fifth)
3343 version = call >> 16;
3348 ret = do_semop(first, ptr, second);
3352 ret = get_errno(semget(first, second, third));
3355 case IPCOP_semctl: {
3356 /* The semun argument to semctl is passed by value, so dereference the
3359 get_user_ual(atptr, ptr);
3360 ret = do_semctl(first, second, third, atptr);
3365 ret = get_errno(msgget(first, second));
3369 ret = do_msgsnd(first, ptr, second, third);
3373 ret = do_msgctl(first, second, ptr);
3380 struct target_ipc_kludge {
3385 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3386 ret = -TARGET_EFAULT;
3390 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3392 unlock_user_struct(tmp, ptr, 0);
3396 ret = do_msgrcv(first, ptr, second, fifth, third);
3405 raddr = do_shmat(first, ptr, second);
3406 if (is_error(raddr))
3407 return get_errno(raddr);
3408 if (put_user_ual(raddr, third))
3409 return -TARGET_EFAULT;
3413 ret = -TARGET_EINVAL;
3418 ret = do_shmdt(ptr);
3422 /* IPC_* flag values are the same on all linux platforms */
3423 ret = get_errno(shmget(first, second, third));
3426 /* IPC_* and SHM_* command values are the same on all linux platforms */
3428 ret = do_shmctl(first, second, ptr);
3431 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3432 ret = -TARGET_ENOSYS;
3439 /* kernel structure types definitions */
3441 #define STRUCT(name, ...) STRUCT_ ## name,
3442 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3444 #include "syscall_types.h"
3448 #undef STRUCT_SPECIAL
3450 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3451 #define STRUCT_SPECIAL(name)
3452 #include "syscall_types.h"
3454 #undef STRUCT_SPECIAL
3456 typedef struct IOCTLEntry IOCTLEntry;
3458 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3459 int fd, int cmd, abi_long arg);
3463 unsigned int host_cmd;
3466 do_ioctl_fn *do_ioctl;
3467 const argtype arg_type[5];
3470 #define IOC_R 0x0001
3471 #define IOC_W 0x0002
3472 #define IOC_RW (IOC_R | IOC_W)
3474 #define MAX_STRUCT_SIZE 4096
3476 #ifdef CONFIG_FIEMAP
3477 /* So fiemap access checks don't overflow on 32 bit systems.
3478 * This is very slightly smaller than the limit imposed by
3479 * the underlying kernel.
3481 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3482 / sizeof(struct fiemap_extent))
3484 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3485 int fd, int cmd, abi_long arg)
3487 /* The parameter for this ioctl is a struct fiemap followed
3488 * by an array of struct fiemap_extent whose size is set
3489 * in fiemap->fm_extent_count. The array is filled in by the
3492 int target_size_in, target_size_out;
3494 const argtype *arg_type = ie->arg_type;
3495 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3498 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3502 assert(arg_type[0] == TYPE_PTR);
3503 assert(ie->access == IOC_RW);
3505 target_size_in = thunk_type_size(arg_type, 0);
3506 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3508 return -TARGET_EFAULT;
3510 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3511 unlock_user(argptr, arg, 0);
3512 fm = (struct fiemap *)buf_temp;
3513 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3514 return -TARGET_EINVAL;
3517 outbufsz = sizeof (*fm) +
3518 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3520 if (outbufsz > MAX_STRUCT_SIZE) {
3521 /* We can't fit all the extents into the fixed size buffer.
3522 * Allocate one that is large enough and use it instead.
3524 fm = g_try_malloc(outbufsz);
3526 return -TARGET_ENOMEM;
3528 memcpy(fm, buf_temp, sizeof(struct fiemap));
3531 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3532 if (!is_error(ret)) {
3533 target_size_out = target_size_in;
3534 /* An extent_count of 0 means we were only counting the extents
3535 * so there are no structs to copy
3537 if (fm->fm_extent_count != 0) {
3538 target_size_out += fm->fm_mapped_extents * extent_size;
3540 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3542 ret = -TARGET_EFAULT;
3544 /* Convert the struct fiemap */
3545 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3546 if (fm->fm_extent_count != 0) {
3547 p = argptr + target_size_in;
3548 /* ...and then all the struct fiemap_extents */
3549 for (i = 0; i < fm->fm_mapped_extents; i++) {
3550 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3555 unlock_user(argptr, arg, target_size_out);
3565 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3566 int fd, int cmd, abi_long arg)
3568 const argtype *arg_type = ie->arg_type;
3572 struct ifconf *host_ifconf;
3574 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3575 int target_ifreq_size;
3580 abi_long target_ifc_buf;
3584 assert(arg_type[0] == TYPE_PTR);
3585 assert(ie->access == IOC_RW);
3588 target_size = thunk_type_size(arg_type, 0);
3590 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3592 return -TARGET_EFAULT;
3593 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3594 unlock_user(argptr, arg, 0);
3596 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3597 target_ifc_len = host_ifconf->ifc_len;
3598 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3600 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3601 nb_ifreq = target_ifc_len / target_ifreq_size;
3602 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3604 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3605 if (outbufsz > MAX_STRUCT_SIZE) {
3606 /* We can't fit all the extents into the fixed size buffer.
3607 * Allocate one that is large enough and use it instead.
3609 host_ifconf = malloc(outbufsz);
3611 return -TARGET_ENOMEM;
3613 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3616 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3618 host_ifconf->ifc_len = host_ifc_len;
3619 host_ifconf->ifc_buf = host_ifc_buf;
3621 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3622 if (!is_error(ret)) {
3623 /* convert host ifc_len to target ifc_len */
3625 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3626 target_ifc_len = nb_ifreq * target_ifreq_size;
3627 host_ifconf->ifc_len = target_ifc_len;
3629 /* restore target ifc_buf */
3631 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3633 /* copy struct ifconf to target user */
3635 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3637 return -TARGET_EFAULT;
3638 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3639 unlock_user(argptr, arg, target_size);
3641 /* copy ifreq[] to target user */
3643 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3644 for (i = 0; i < nb_ifreq ; i++) {
3645 thunk_convert(argptr + i * target_ifreq_size,
3646 host_ifc_buf + i * sizeof(struct ifreq),
3647 ifreq_arg_type, THUNK_TARGET);
3649 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3659 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3660 int cmd, abi_long arg)
3663 struct dm_ioctl *host_dm;
3664 abi_long guest_data;
3665 uint32_t guest_data_size;
3667 const argtype *arg_type = ie->arg_type;
3669 void *big_buf = NULL;
3673 target_size = thunk_type_size(arg_type, 0);
3674 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3676 ret = -TARGET_EFAULT;
3679 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3680 unlock_user(argptr, arg, 0);
3682 /* buf_temp is too small, so fetch things into a bigger buffer */
3683 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3684 memcpy(big_buf, buf_temp, target_size);
3688 guest_data = arg + host_dm->data_start;
3689 if ((guest_data - arg) < 0) {
3693 guest_data_size = host_dm->data_size - host_dm->data_start;
3694 host_data = (char*)host_dm + host_dm->data_start;
3696 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3697 switch (ie->host_cmd) {
3699 case DM_LIST_DEVICES:
3702 case DM_DEV_SUSPEND:
3705 case DM_TABLE_STATUS:
3706 case DM_TABLE_CLEAR:
3708 case DM_LIST_VERSIONS:
3712 case DM_DEV_SET_GEOMETRY:
3713 /* data contains only strings */
3714 memcpy(host_data, argptr, guest_data_size);
3717 memcpy(host_data, argptr, guest_data_size);
3718 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3722 void *gspec = argptr;
3723 void *cur_data = host_data;
3724 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3725 int spec_size = thunk_type_size(arg_type, 0);
3728 for (i = 0; i < host_dm->target_count; i++) {
3729 struct dm_target_spec *spec = cur_data;
3733 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3734 slen = strlen((char*)gspec + spec_size) + 1;
3736 spec->next = sizeof(*spec) + slen;
3737 strcpy((char*)&spec[1], gspec + spec_size);
3739 cur_data += spec->next;
3744 ret = -TARGET_EINVAL;
3745 unlock_user(argptr, guest_data, 0);
3748 unlock_user(argptr, guest_data, 0);
3750 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3751 if (!is_error(ret)) {
3752 guest_data = arg + host_dm->data_start;
3753 guest_data_size = host_dm->data_size - host_dm->data_start;
3754 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3755 switch (ie->host_cmd) {
3760 case DM_DEV_SUSPEND:
3763 case DM_TABLE_CLEAR:
3765 case DM_DEV_SET_GEOMETRY:
3766 /* no return data */
3768 case DM_LIST_DEVICES:
3770 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3771 uint32_t remaining_data = guest_data_size;
3772 void *cur_data = argptr;
3773 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3774 int nl_size = 12; /* can't use thunk_size due to alignment */
3777 uint32_t next = nl->next;
3779 nl->next = nl_size + (strlen(nl->name) + 1);
3781 if (remaining_data < nl->next) {
3782 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3785 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3786 strcpy(cur_data + nl_size, nl->name);
3787 cur_data += nl->next;
3788 remaining_data -= nl->next;
3792 nl = (void*)nl + next;
3797 case DM_TABLE_STATUS:
3799 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3800 void *cur_data = argptr;
3801 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3802 int spec_size = thunk_type_size(arg_type, 0);
3805 for (i = 0; i < host_dm->target_count; i++) {
3806 uint32_t next = spec->next;
3807 int slen = strlen((char*)&spec[1]) + 1;
3808 spec->next = (cur_data - argptr) + spec_size + slen;
3809 if (guest_data_size < spec->next) {
3810 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3813 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3814 strcpy(cur_data + spec_size, (char*)&spec[1]);
3815 cur_data = argptr + spec->next;
3816 spec = (void*)host_dm + host_dm->data_start + next;
3822 void *hdata = (void*)host_dm + host_dm->data_start;
3823 int count = *(uint32_t*)hdata;
3824 uint64_t *hdev = hdata + 8;
3825 uint64_t *gdev = argptr + 8;
3828 *(uint32_t*)argptr = tswap32(count);
3829 for (i = 0; i < count; i++) {
3830 *gdev = tswap64(*hdev);
3836 case DM_LIST_VERSIONS:
3838 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3839 uint32_t remaining_data = guest_data_size;
3840 void *cur_data = argptr;
3841 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3842 int vers_size = thunk_type_size(arg_type, 0);
3845 uint32_t next = vers->next;
3847 vers->next = vers_size + (strlen(vers->name) + 1);
3849 if (remaining_data < vers->next) {
3850 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3853 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3854 strcpy(cur_data + vers_size, vers->name);
3855 cur_data += vers->next;
3856 remaining_data -= vers->next;
3860 vers = (void*)vers + next;
3865 unlock_user(argptr, guest_data, 0);
3866 ret = -TARGET_EINVAL;
3869 unlock_user(argptr, guest_data, guest_data_size);
3871 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3873 ret = -TARGET_EFAULT;
3876 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3877 unlock_user(argptr, arg, target_size);
3884 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3885 int cmd, abi_long arg)
3889 const argtype *arg_type = ie->arg_type;
3890 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3893 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3894 struct blkpg_partition host_part;
3896 /* Read and convert blkpg */
3898 target_size = thunk_type_size(arg_type, 0);
3899 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3901 ret = -TARGET_EFAULT;
3904 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3905 unlock_user(argptr, arg, 0);
3907 switch (host_blkpg->op) {
3908 case BLKPG_ADD_PARTITION:
3909 case BLKPG_DEL_PARTITION:
3910 /* payload is struct blkpg_partition */
3913 /* Unknown opcode */
3914 ret = -TARGET_EINVAL;
3918 /* Read and convert blkpg->data */
3919 arg = (abi_long)(uintptr_t)host_blkpg->data;
3920 target_size = thunk_type_size(part_arg_type, 0);
3921 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3923 ret = -TARGET_EFAULT;
3926 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3927 unlock_user(argptr, arg, 0);
3929 /* Swizzle the data pointer to our local copy and call! */
3930 host_blkpg->data = &host_part;
3931 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3937 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3938 int fd, int cmd, abi_long arg)
3940 const argtype *arg_type = ie->arg_type;
3941 const StructEntry *se;
3942 const argtype *field_types;
3943 const int *dst_offsets, *src_offsets;
3946 abi_ulong *target_rt_dev_ptr;
3947 unsigned long *host_rt_dev_ptr;
3951 assert(ie->access == IOC_W);
3952 assert(*arg_type == TYPE_PTR);
3954 assert(*arg_type == TYPE_STRUCT);
3955 target_size = thunk_type_size(arg_type, 0);
3956 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3958 return -TARGET_EFAULT;
3961 assert(*arg_type == (int)STRUCT_rtentry);
3962 se = struct_entries + *arg_type++;
3963 assert(se->convert[0] == NULL);
3964 /* convert struct here to be able to catch rt_dev string */
3965 field_types = se->field_types;
3966 dst_offsets = se->field_offsets[THUNK_HOST];
3967 src_offsets = se->field_offsets[THUNK_TARGET];
3968 for (i = 0; i < se->nb_fields; i++) {
3969 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3970 assert(*field_types == TYPE_PTRVOID);
3971 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3972 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3973 if (*target_rt_dev_ptr != 0) {
3974 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3975 tswapal(*target_rt_dev_ptr));
3976 if (!*host_rt_dev_ptr) {
3977 unlock_user(argptr, arg, 0);
3978 return -TARGET_EFAULT;
3981 *host_rt_dev_ptr = 0;
3986 field_types = thunk_convert(buf_temp + dst_offsets[i],
3987 argptr + src_offsets[i],
3988 field_types, THUNK_HOST);
3990 unlock_user(argptr, arg, 0);
3992 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3993 if (*host_rt_dev_ptr != 0) {
3994 unlock_user((void *)*host_rt_dev_ptr,
3995 *target_rt_dev_ptr, 0);
4000 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4001 int fd, int cmd, abi_long arg)
4003 int sig = target_to_host_signal(arg);
4004 return get_errno(ioctl(fd, ie->host_cmd, sig));
4007 static IOCTLEntry ioctl_entries[] = {
4008 #define IOCTL(cmd, access, ...) \
4009 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4010 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4011 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4016 /* ??? Implement proper locking for ioctls. */
4017 /* do_ioctl() Must return target values and target errnos. */
4018 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4020 const IOCTLEntry *ie;
4021 const argtype *arg_type;
4023 uint8_t buf_temp[MAX_STRUCT_SIZE];
4029 if (ie->target_cmd == 0) {
4030 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4031 return -TARGET_ENOSYS;
4033 if (ie->target_cmd == cmd)
4037 arg_type = ie->arg_type;
4039 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4042 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4045 switch(arg_type[0]) {
4048 ret = get_errno(ioctl(fd, ie->host_cmd));
4052 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4056 target_size = thunk_type_size(arg_type, 0);
4057 switch(ie->access) {
4059 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4060 if (!is_error(ret)) {
4061 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4063 return -TARGET_EFAULT;
4064 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4065 unlock_user(argptr, arg, target_size);
4069 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4071 return -TARGET_EFAULT;
4072 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4073 unlock_user(argptr, arg, 0);
4074 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4078 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4080 return -TARGET_EFAULT;
4081 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4082 unlock_user(argptr, arg, 0);
4083 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4084 if (!is_error(ret)) {
4085 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4087 return -TARGET_EFAULT;
4088 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4089 unlock_user(argptr, arg, target_size);
4095 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4096 (long)cmd, arg_type[0]);
4097 ret = -TARGET_ENOSYS;
4103 static const bitmask_transtbl iflag_tbl[] = {
4104 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4105 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4106 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4107 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4108 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4109 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4110 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4111 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4112 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4113 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4114 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4115 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4116 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4117 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4121 static const bitmask_transtbl oflag_tbl[] = {
4122 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4123 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4124 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4125 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4126 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4127 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4128 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4129 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4130 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4131 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4132 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4133 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4134 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4135 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4136 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4137 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4138 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4139 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4140 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4141 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4142 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4143 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4144 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4145 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4149 static const bitmask_transtbl cflag_tbl[] = {
4150 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4151 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4152 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4153 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4154 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4155 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4156 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4157 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4158 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4159 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4160 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4161 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4162 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4163 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4164 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4165 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4166 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4167 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4168 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4169 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4170 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4171 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4172 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4173 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4174 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4175 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4176 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4177 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4178 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4179 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4180 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4184 static const bitmask_transtbl lflag_tbl[] = {
4185 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4186 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4187 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4188 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4189 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4190 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4191 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4192 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4193 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4194 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4195 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4196 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4197 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4198 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4199 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4203 static void target_to_host_termios (void *dst, const void *src)
4205 struct host_termios *host = dst;
4206 const struct target_termios *target = src;
4209 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4211 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4213 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4215 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4216 host->c_line = target->c_line;
4218 memset(host->c_cc, 0, sizeof(host->c_cc));
4219 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4220 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4221 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4222 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4223 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4224 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4225 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4226 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4227 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4228 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4229 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4230 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4231 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4232 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4233 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4234 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4235 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4238 static void host_to_target_termios (void *dst, const void *src)
4240 struct target_termios *target = dst;
4241 const struct host_termios *host = src;
4244 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4246 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4248 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4250 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4251 target->c_line = host->c_line;
4253 memset(target->c_cc, 0, sizeof(target->c_cc));
4254 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4255 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4256 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4257 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4258 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4259 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4260 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4261 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4262 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4263 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4264 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4265 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4266 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4267 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4268 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4269 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4270 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4273 static const StructEntry struct_termios_def = {
4274 .convert = { host_to_target_termios, target_to_host_termios },
4275 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4276 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4279 static bitmask_transtbl mmap_flags_tbl[] = {
4280 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4281 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4282 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4283 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4284 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4285 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4286 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4287 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4288 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4293 #if defined(TARGET_I386)
4295 /* NOTE: there is really one LDT for all the threads */
4296 static uint8_t *ldt_table;
4298 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4305 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4306 if (size > bytecount)
4308 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4310 return -TARGET_EFAULT;
4311 /* ??? Should this by byteswapped? */
4312 memcpy(p, ldt_table, size);
4313 unlock_user(p, ptr, size);
4317 /* XXX: add locking support */
4318 static abi_long write_ldt(CPUX86State *env,
4319 abi_ulong ptr, unsigned long bytecount, int oldmode)
4321 struct target_modify_ldt_ldt_s ldt_info;
4322 struct target_modify_ldt_ldt_s *target_ldt_info;
4323 int seg_32bit, contents, read_exec_only, limit_in_pages;
4324 int seg_not_present, useable, lm;
4325 uint32_t *lp, entry_1, entry_2;
4327 if (bytecount != sizeof(ldt_info))
4328 return -TARGET_EINVAL;
4329 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4330 return -TARGET_EFAULT;
4331 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4332 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4333 ldt_info.limit = tswap32(target_ldt_info->limit);
4334 ldt_info.flags = tswap32(target_ldt_info->flags);
4335 unlock_user_struct(target_ldt_info, ptr, 0);
4337 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4338 return -TARGET_EINVAL;
4339 seg_32bit = ldt_info.flags & 1;
4340 contents = (ldt_info.flags >> 1) & 3;
4341 read_exec_only = (ldt_info.flags >> 3) & 1;
4342 limit_in_pages = (ldt_info.flags >> 4) & 1;
4343 seg_not_present = (ldt_info.flags >> 5) & 1;
4344 useable = (ldt_info.flags >> 6) & 1;
4348 lm = (ldt_info.flags >> 7) & 1;
4350 if (contents == 3) {
4352 return -TARGET_EINVAL;
4353 if (seg_not_present == 0)
4354 return -TARGET_EINVAL;
4356 /* allocate the LDT */
4358 env->ldt.base = target_mmap(0,
4359 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4360 PROT_READ|PROT_WRITE,
4361 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4362 if (env->ldt.base == -1)
4363 return -TARGET_ENOMEM;
4364 memset(g2h(env->ldt.base), 0,
4365 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4366 env->ldt.limit = 0xffff;
4367 ldt_table = g2h(env->ldt.base);
4370 /* NOTE: same code as Linux kernel */
4371 /* Allow LDTs to be cleared by the user. */
4372 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4375 read_exec_only == 1 &&
4377 limit_in_pages == 0 &&
4378 seg_not_present == 1 &&
4386 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4387 (ldt_info.limit & 0x0ffff);
4388 entry_2 = (ldt_info.base_addr & 0xff000000) |
4389 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4390 (ldt_info.limit & 0xf0000) |
4391 ((read_exec_only ^ 1) << 9) |
4393 ((seg_not_present ^ 1) << 15) |
4395 (limit_in_pages << 23) |
4399 entry_2 |= (useable << 20);
4401 /* Install the new entry ... */
4403 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4404 lp[0] = tswap32(entry_1);
4405 lp[1] = tswap32(entry_2);
4409 /* specific and weird i386 syscalls */
4410 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4411 unsigned long bytecount)
4417 ret = read_ldt(ptr, bytecount);
4420 ret = write_ldt(env, ptr, bytecount, 1);
4423 ret = write_ldt(env, ptr, bytecount, 0);
4426 ret = -TARGET_ENOSYS;
4432 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4433 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4435 uint64_t *gdt_table = g2h(env->gdt.base);
4436 struct target_modify_ldt_ldt_s ldt_info;
4437 struct target_modify_ldt_ldt_s *target_ldt_info;
4438 int seg_32bit, contents, read_exec_only, limit_in_pages;
4439 int seg_not_present, useable, lm;
4440 uint32_t *lp, entry_1, entry_2;
4443 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4444 if (!target_ldt_info)
4445 return -TARGET_EFAULT;
4446 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4447 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4448 ldt_info.limit = tswap32(target_ldt_info->limit);
4449 ldt_info.flags = tswap32(target_ldt_info->flags);
4450 if (ldt_info.entry_number == -1) {
4451 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4452 if (gdt_table[i] == 0) {
4453 ldt_info.entry_number = i;
4454 target_ldt_info->entry_number = tswap32(i);
4459 unlock_user_struct(target_ldt_info, ptr, 1);
4461 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4462 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4463 return -TARGET_EINVAL;
4464 seg_32bit = ldt_info.flags & 1;
4465 contents = (ldt_info.flags >> 1) & 3;
4466 read_exec_only = (ldt_info.flags >> 3) & 1;
4467 limit_in_pages = (ldt_info.flags >> 4) & 1;
4468 seg_not_present = (ldt_info.flags >> 5) & 1;
4469 useable = (ldt_info.flags >> 6) & 1;
4473 lm = (ldt_info.flags >> 7) & 1;
4476 if (contents == 3) {
4477 if (seg_not_present == 0)
4478 return -TARGET_EINVAL;
4481 /* NOTE: same code as Linux kernel */
4482 /* Allow LDTs to be cleared by the user. */
4483 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4484 if ((contents == 0 &&
4485 read_exec_only == 1 &&
4487 limit_in_pages == 0 &&
4488 seg_not_present == 1 &&
4496 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4497 (ldt_info.limit & 0x0ffff);
4498 entry_2 = (ldt_info.base_addr & 0xff000000) |
4499 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4500 (ldt_info.limit & 0xf0000) |
4501 ((read_exec_only ^ 1) << 9) |
4503 ((seg_not_present ^ 1) << 15) |
4505 (limit_in_pages << 23) |
4510 /* Install the new entry ... */
4512 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4513 lp[0] = tswap32(entry_1);
4514 lp[1] = tswap32(entry_2);
4518 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4520 struct target_modify_ldt_ldt_s *target_ldt_info;
4521 uint64_t *gdt_table = g2h(env->gdt.base);
4522 uint32_t base_addr, limit, flags;
4523 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4524 int seg_not_present, useable, lm;
4525 uint32_t *lp, entry_1, entry_2;
4527 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4528 if (!target_ldt_info)
4529 return -TARGET_EFAULT;
4530 idx = tswap32(target_ldt_info->entry_number);
4531 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4532 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4533 unlock_user_struct(target_ldt_info, ptr, 1);
4534 return -TARGET_EINVAL;
4536 lp = (uint32_t *)(gdt_table + idx);
4537 entry_1 = tswap32(lp[0]);
4538 entry_2 = tswap32(lp[1]);
4540 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4541 contents = (entry_2 >> 10) & 3;
4542 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4543 seg_32bit = (entry_2 >> 22) & 1;
4544 limit_in_pages = (entry_2 >> 23) & 1;
4545 useable = (entry_2 >> 20) & 1;
4549 lm = (entry_2 >> 21) & 1;
4551 flags = (seg_32bit << 0) | (contents << 1) |
4552 (read_exec_only << 3) | (limit_in_pages << 4) |
4553 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4554 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4555 base_addr = (entry_1 >> 16) |
4556 (entry_2 & 0xff000000) |
4557 ((entry_2 & 0xff) << 16);
4558 target_ldt_info->base_addr = tswapal(base_addr);
4559 target_ldt_info->limit = tswap32(limit);
4560 target_ldt_info->flags = tswap32(flags);
4561 unlock_user_struct(target_ldt_info, ptr, 1);
4564 #endif /* TARGET_I386 && TARGET_ABI32 */
4566 #ifndef TARGET_ABI32
4567 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4574 case TARGET_ARCH_SET_GS:
4575 case TARGET_ARCH_SET_FS:
4576 if (code == TARGET_ARCH_SET_GS)
4580 cpu_x86_load_seg(env, idx, 0);
4581 env->segs[idx].base = addr;
4583 case TARGET_ARCH_GET_GS:
4584 case TARGET_ARCH_GET_FS:
4585 if (code == TARGET_ARCH_GET_GS)
4589 val = env->segs[idx].base;
4590 if (put_user(val, addr, abi_ulong))
4591 ret = -TARGET_EFAULT;
4594 ret = -TARGET_EINVAL;
4601 #endif /* defined(TARGET_I386) */
4603 #define NEW_STACK_SIZE 0x40000
4606 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4609 pthread_mutex_t mutex;
4610 pthread_cond_t cond;
4613 abi_ulong child_tidptr;
4614 abi_ulong parent_tidptr;
4618 static void *clone_func(void *arg)
4620 new_thread_info *info = arg;
4625 rcu_register_thread();
4627 cpu = ENV_GET_CPU(env);
4629 ts = (TaskState *)cpu->opaque;
4630 info->tid = gettid();
4631 cpu->host_tid = info->tid;
4633 if (info->child_tidptr)
4634 put_user_u32(info->tid, info->child_tidptr);
4635 if (info->parent_tidptr)
4636 put_user_u32(info->tid, info->parent_tidptr);
4637 /* Enable signals. */
4638 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4639 /* Signal to the parent that we're ready. */
4640 pthread_mutex_lock(&info->mutex);
4641 pthread_cond_broadcast(&info->cond);
4642 pthread_mutex_unlock(&info->mutex);
4643 /* Wait until the parent has finshed initializing the tls state. */
4644 pthread_mutex_lock(&clone_lock);
4645 pthread_mutex_unlock(&clone_lock);
4651 /* do_fork() Must return host values and target errnos (unlike most
4652 do_*() functions). */
4653 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4654 abi_ulong parent_tidptr, target_ulong newtls,
4655 abi_ulong child_tidptr)
4657 CPUState *cpu = ENV_GET_CPU(env);
4661 CPUArchState *new_env;
4662 unsigned int nptl_flags;
4665 /* Emulate vfork() with fork() */
4666 if (flags & CLONE_VFORK)
4667 flags &= ~(CLONE_VFORK | CLONE_VM);
4669 if (flags & CLONE_VM) {
4670 TaskState *parent_ts = (TaskState *)cpu->opaque;
4671 new_thread_info info;
4672 pthread_attr_t attr;
4674 ts = g_new0(TaskState, 1);
4675 init_task_state(ts);
4676 /* we create a new CPU instance. */
4677 new_env = cpu_copy(env);
4678 /* Init regs that differ from the parent. */
4679 cpu_clone_regs(new_env, newsp);
4680 new_cpu = ENV_GET_CPU(new_env);
4681 new_cpu->opaque = ts;
4682 ts->bprm = parent_ts->bprm;
4683 ts->info = parent_ts->info;
4685 flags &= ~CLONE_NPTL_FLAGS2;
4687 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4688 ts->child_tidptr = child_tidptr;
4691 if (nptl_flags & CLONE_SETTLS)
4692 cpu_set_tls (new_env, newtls);
4694 /* Grab a mutex so that thread setup appears atomic. */
4695 pthread_mutex_lock(&clone_lock);
4697 memset(&info, 0, sizeof(info));
4698 pthread_mutex_init(&info.mutex, NULL);
4699 pthread_mutex_lock(&info.mutex);
4700 pthread_cond_init(&info.cond, NULL);
4702 if (nptl_flags & CLONE_CHILD_SETTID)
4703 info.child_tidptr = child_tidptr;
4704 if (nptl_flags & CLONE_PARENT_SETTID)
4705 info.parent_tidptr = parent_tidptr;
4707 ret = pthread_attr_init(&attr);
4708 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4709 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4710 /* It is not safe to deliver signals until the child has finished
4711 initializing, so temporarily block all signals. */
4712 sigfillset(&sigmask);
4713 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4715 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4716 /* TODO: Free new CPU state if thread creation failed. */
4718 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4719 pthread_attr_destroy(&attr);
4721 /* Wait for the child to initialize. */
4722 pthread_cond_wait(&info.cond, &info.mutex);
4724 if (flags & CLONE_PARENT_SETTID)
4725 put_user_u32(ret, parent_tidptr);
4729 pthread_mutex_unlock(&info.mutex);
4730 pthread_cond_destroy(&info.cond);
4731 pthread_mutex_destroy(&info.mutex);
4732 pthread_mutex_unlock(&clone_lock);
4734 /* if no CLONE_VM, we consider it is a fork */
4735 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
4736 return -TARGET_EINVAL;
4741 /* Child Process. */
4743 cpu_clone_regs(env, newsp);
4745 /* There is a race condition here. The parent process could
4746 theoretically read the TID in the child process before the child
4747 tid is set. This would require using either ptrace
4748 (not implemented) or having *_tidptr to point at a shared memory
4749 mapping. We can't repeat the spinlock hack used above because
4750 the child process gets its own copy of the lock. */
4751 if (flags & CLONE_CHILD_SETTID)
4752 put_user_u32(gettid(), child_tidptr);
4753 if (flags & CLONE_PARENT_SETTID)
4754 put_user_u32(gettid(), parent_tidptr);
4755 ts = (TaskState *)cpu->opaque;
4756 if (flags & CLONE_SETTLS)
4757 cpu_set_tls (env, newtls);
4758 if (flags & CLONE_CHILD_CLEARTID)
4759 ts->child_tidptr = child_tidptr;
4767 /* warning : doesn't handle linux specific flags... */
4768 static int target_to_host_fcntl_cmd(int cmd)
4771 case TARGET_F_DUPFD:
4772 case TARGET_F_GETFD:
4773 case TARGET_F_SETFD:
4774 case TARGET_F_GETFL:
4775 case TARGET_F_SETFL:
4777 case TARGET_F_GETLK:
4779 case TARGET_F_SETLK:
4781 case TARGET_F_SETLKW:
4783 case TARGET_F_GETOWN:
4785 case TARGET_F_SETOWN:
4787 case TARGET_F_GETSIG:
4789 case TARGET_F_SETSIG:
4791 #if TARGET_ABI_BITS == 32
4792 case TARGET_F_GETLK64:
4794 case TARGET_F_SETLK64:
4796 case TARGET_F_SETLKW64:
4799 case TARGET_F_SETLEASE:
4801 case TARGET_F_GETLEASE:
4803 #ifdef F_DUPFD_CLOEXEC
4804 case TARGET_F_DUPFD_CLOEXEC:
4805 return F_DUPFD_CLOEXEC;
4807 case TARGET_F_NOTIFY:
4810 case TARGET_F_GETOWN_EX:
4814 case TARGET_F_SETOWN_EX:
4818 return -TARGET_EINVAL;
4820 return -TARGET_EINVAL;
4823 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4824 static const bitmask_transtbl flock_tbl[] = {
4825 TRANSTBL_CONVERT(F_RDLCK),
4826 TRANSTBL_CONVERT(F_WRLCK),
4827 TRANSTBL_CONVERT(F_UNLCK),
4828 TRANSTBL_CONVERT(F_EXLCK),
4829 TRANSTBL_CONVERT(F_SHLCK),
4833 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4836 struct target_flock *target_fl;
4837 struct flock64 fl64;
4838 struct target_flock64 *target_fl64;
4840 struct f_owner_ex fox;
4841 struct target_f_owner_ex *target_fox;
4844 int host_cmd = target_to_host_fcntl_cmd(cmd);
4846 if (host_cmd == -TARGET_EINVAL)
4850 case TARGET_F_GETLK:
4851 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4852 return -TARGET_EFAULT;
4854 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4855 fl.l_whence = tswap16(target_fl->l_whence);
4856 fl.l_start = tswapal(target_fl->l_start);
4857 fl.l_len = tswapal(target_fl->l_len);
4858 fl.l_pid = tswap32(target_fl->l_pid);
4859 unlock_user_struct(target_fl, arg, 0);
4860 ret = get_errno(fcntl(fd, host_cmd, &fl));
4862 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4863 return -TARGET_EFAULT;
4865 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4866 target_fl->l_whence = tswap16(fl.l_whence);
4867 target_fl->l_start = tswapal(fl.l_start);
4868 target_fl->l_len = tswapal(fl.l_len);
4869 target_fl->l_pid = tswap32(fl.l_pid);
4870 unlock_user_struct(target_fl, arg, 1);
4874 case TARGET_F_SETLK:
4875 case TARGET_F_SETLKW:
4876 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4877 return -TARGET_EFAULT;
4879 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4880 fl.l_whence = tswap16(target_fl->l_whence);
4881 fl.l_start = tswapal(target_fl->l_start);
4882 fl.l_len = tswapal(target_fl->l_len);
4883 fl.l_pid = tswap32(target_fl->l_pid);
4884 unlock_user_struct(target_fl, arg, 0);
4885 ret = get_errno(fcntl(fd, host_cmd, &fl));
4888 case TARGET_F_GETLK64:
4889 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4890 return -TARGET_EFAULT;
4892 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4893 fl64.l_whence = tswap16(target_fl64->l_whence);
4894 fl64.l_start = tswap64(target_fl64->l_start);
4895 fl64.l_len = tswap64(target_fl64->l_len);
4896 fl64.l_pid = tswap32(target_fl64->l_pid);
4897 unlock_user_struct(target_fl64, arg, 0);
4898 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4900 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4901 return -TARGET_EFAULT;
4902 target_fl64->l_type =
4903 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4904 target_fl64->l_whence = tswap16(fl64.l_whence);
4905 target_fl64->l_start = tswap64(fl64.l_start);
4906 target_fl64->l_len = tswap64(fl64.l_len);
4907 target_fl64->l_pid = tswap32(fl64.l_pid);
4908 unlock_user_struct(target_fl64, arg, 1);
4911 case TARGET_F_SETLK64:
4912 case TARGET_F_SETLKW64:
4913 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4914 return -TARGET_EFAULT;
4916 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4917 fl64.l_whence = tswap16(target_fl64->l_whence);
4918 fl64.l_start = tswap64(target_fl64->l_start);
4919 fl64.l_len = tswap64(target_fl64->l_len);
4920 fl64.l_pid = tswap32(target_fl64->l_pid);
4921 unlock_user_struct(target_fl64, arg, 0);
4922 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4925 case TARGET_F_GETFL:
4926 ret = get_errno(fcntl(fd, host_cmd, arg));
4928 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4932 case TARGET_F_SETFL:
4933 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4937 case TARGET_F_GETOWN_EX:
4938 ret = get_errno(fcntl(fd, host_cmd, &fox));
4940 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4941 return -TARGET_EFAULT;
4942 target_fox->type = tswap32(fox.type);
4943 target_fox->pid = tswap32(fox.pid);
4944 unlock_user_struct(target_fox, arg, 1);
4950 case TARGET_F_SETOWN_EX:
4951 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4952 return -TARGET_EFAULT;
4953 fox.type = tswap32(target_fox->type);
4954 fox.pid = tswap32(target_fox->pid);
4955 unlock_user_struct(target_fox, arg, 0);
4956 ret = get_errno(fcntl(fd, host_cmd, &fox));
4960 case TARGET_F_SETOWN:
4961 case TARGET_F_GETOWN:
4962 case TARGET_F_SETSIG:
4963 case TARGET_F_GETSIG:
4964 case TARGET_F_SETLEASE:
4965 case TARGET_F_GETLEASE:
4966 ret = get_errno(fcntl(fd, host_cmd, arg));
4970 ret = get_errno(fcntl(fd, cmd, arg));
4978 static inline int high2lowuid(int uid)
4986 static inline int high2lowgid(int gid)
4994 static inline int low2highuid(int uid)
4996 if ((int16_t)uid == -1)
5002 static inline int low2highgid(int gid)
5004 if ((int16_t)gid == -1)
5009 static inline int tswapid(int id)
5014 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5016 #else /* !USE_UID16 */
5017 static inline int high2lowuid(int uid)
5021 static inline int high2lowgid(int gid)
5025 static inline int low2highuid(int uid)
5029 static inline int low2highgid(int gid)
5033 static inline int tswapid(int id)
5038 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5040 #endif /* USE_UID16 */
5042 void syscall_init(void)
5045 const argtype *arg_type;
5049 thunk_init(STRUCT_MAX);
5051 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5052 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5053 #include "syscall_types.h"
5055 #undef STRUCT_SPECIAL
5057 /* Build target_to_host_errno_table[] table from
5058 * host_to_target_errno_table[]. */
5059 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5060 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5063 /* we patch the ioctl size if necessary. We rely on the fact that
5064 no ioctl has all the bits at '1' in the size field */
5066 while (ie->target_cmd != 0) {
5067 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5068 TARGET_IOC_SIZEMASK) {
5069 arg_type = ie->arg_type;
5070 if (arg_type[0] != TYPE_PTR) {
5071 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5076 size = thunk_type_size(arg_type, 0);
5077 ie->target_cmd = (ie->target_cmd &
5078 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5079 (size << TARGET_IOC_SIZESHIFT);
5082 /* automatic consistency check if same arch */
5083 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5084 (defined(__x86_64__) && defined(TARGET_X86_64))
5085 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5086 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5087 ie->name, ie->target_cmd, ie->host_cmd);
5094 #if TARGET_ABI_BITS == 32
5095 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5097 #ifdef TARGET_WORDS_BIGENDIAN
5098 return ((uint64_t)word0 << 32) | word1;
5100 return ((uint64_t)word1 << 32) | word0;
5103 #else /* TARGET_ABI_BITS == 32 */
5104 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5108 #endif /* TARGET_ABI_BITS != 32 */
5110 #ifdef TARGET_NR_truncate64
5111 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5116 if (regpairs_aligned(cpu_env)) {
5120 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5124 #ifdef TARGET_NR_ftruncate64
5125 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5130 if (regpairs_aligned(cpu_env)) {
5134 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5138 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5139 abi_ulong target_addr)
5141 struct target_timespec *target_ts;
5143 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5144 return -TARGET_EFAULT;
5145 host_ts->tv_sec = tswapal(target_ts->tv_sec);
5146 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
5147 unlock_user_struct(target_ts, target_addr, 0);
5151 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5152 struct timespec *host_ts)
5154 struct target_timespec *target_ts;
5156 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5157 return -TARGET_EFAULT;
5158 target_ts->tv_sec = tswapal(host_ts->tv_sec);
5159 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
5160 unlock_user_struct(target_ts, target_addr, 1);
5164 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5165 abi_ulong target_addr)
5167 struct target_itimerspec *target_itspec;
5169 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5170 return -TARGET_EFAULT;
5173 host_itspec->it_interval.tv_sec =
5174 tswapal(target_itspec->it_interval.tv_sec);
5175 host_itspec->it_interval.tv_nsec =
5176 tswapal(target_itspec->it_interval.tv_nsec);
5177 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5178 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5180 unlock_user_struct(target_itspec, target_addr, 1);
5184 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5185 struct itimerspec *host_its)
5187 struct target_itimerspec *target_itspec;
5189 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5190 return -TARGET_EFAULT;
5193 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5194 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5196 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5197 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5199 unlock_user_struct(target_itspec, target_addr, 0);
5203 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5204 abi_ulong target_addr)
5206 struct target_sigevent *target_sevp;
5208 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5209 return -TARGET_EFAULT;
5212 /* This union is awkward on 64 bit systems because it has a 32 bit
5213 * integer and a pointer in it; we follow the conversion approach
5214 * used for handling sigval types in signal.c so the guest should get
5215 * the correct value back even if we did a 64 bit byteswap and it's
5216 * using the 32 bit integer.
5218 host_sevp->sigev_value.sival_ptr =
5219 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5220 host_sevp->sigev_signo =
5221 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5222 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5223 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5225 unlock_user_struct(target_sevp, target_addr, 1);
5229 #if defined(TARGET_NR_mlockall)
5230 static inline int target_to_host_mlockall_arg(int arg)
5234 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5235 result |= MCL_CURRENT;
5237 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5238 result |= MCL_FUTURE;
5244 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5245 static inline abi_long host_to_target_stat64(void *cpu_env,
5246 abi_ulong target_addr,
5247 struct stat *host_st)
5249 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5250 if (((CPUARMState *)cpu_env)->eabi) {
5251 struct target_eabi_stat64 *target_st;
5253 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5254 return -TARGET_EFAULT;
5255 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5256 __put_user(host_st->st_dev, &target_st->st_dev);
5257 __put_user(host_st->st_ino, &target_st->st_ino);
5258 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5259 __put_user(host_st->st_ino, &target_st->__st_ino);
5261 __put_user(host_st->st_mode, &target_st->st_mode);
5262 __put_user(host_st->st_nlink, &target_st->st_nlink);
5263 __put_user(host_st->st_uid, &target_st->st_uid);
5264 __put_user(host_st->st_gid, &target_st->st_gid);
5265 __put_user(host_st->st_rdev, &target_st->st_rdev);
5266 __put_user(host_st->st_size, &target_st->st_size);
5267 __put_user(host_st->st_blksize, &target_st->st_blksize);
5268 __put_user(host_st->st_blocks, &target_st->st_blocks);
5269 __put_user(host_st->st_atime, &target_st->target_st_atime);
5270 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5271 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5272 unlock_user_struct(target_st, target_addr, 1);
5276 #if defined(TARGET_HAS_STRUCT_STAT64)
5277 struct target_stat64 *target_st;
5279 struct target_stat *target_st;
5282 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5283 return -TARGET_EFAULT;
5284 memset(target_st, 0, sizeof(*target_st));
5285 __put_user(host_st->st_dev, &target_st->st_dev);
5286 __put_user(host_st->st_ino, &target_st->st_ino);
5287 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5288 __put_user(host_st->st_ino, &target_st->__st_ino);
5290 __put_user(host_st->st_mode, &target_st->st_mode);
5291 __put_user(host_st->st_nlink, &target_st->st_nlink);
5292 __put_user(host_st->st_uid, &target_st->st_uid);
5293 __put_user(host_st->st_gid, &target_st->st_gid);
5294 __put_user(host_st->st_rdev, &target_st->st_rdev);
5295 /* XXX: better use of kernel struct */
5296 __put_user(host_st->st_size, &target_st->st_size);
5297 __put_user(host_st->st_blksize, &target_st->st_blksize);
5298 __put_user(host_st->st_blocks, &target_st->st_blocks);
5299 __put_user(host_st->st_atime, &target_st->target_st_atime);
5300 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5301 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5302 unlock_user_struct(target_st, target_addr, 1);
5309 /* ??? Using host futex calls even when target atomic operations
5310 are not really atomic probably breaks things. However implementing
5311 futexes locally would make futexes shared between multiple processes
5312 tricky. However they're probably useless because guest atomic
5313 operations won't work either. */
5314 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5315 target_ulong uaddr2, int val3)
5317 struct timespec ts, *pts;
5320 /* ??? We assume FUTEX_* constants are the same on both host
5322 #ifdef FUTEX_CMD_MASK
5323 base_op = op & FUTEX_CMD_MASK;
5329 case FUTEX_WAIT_BITSET:
5332 target_to_host_timespec(pts, timeout);
5336 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5339 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5341 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5343 case FUTEX_CMP_REQUEUE:
5345 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5346 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5347 But the prototype takes a `struct timespec *'; insert casts
5348 to satisfy the compiler. We do not need to tswap TIMEOUT
5349 since it's not compared to guest memory. */
5350 pts = (struct timespec *)(uintptr_t) timeout;
5351 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5353 (base_op == FUTEX_CMP_REQUEUE
5357 return -TARGET_ENOSYS;
5360 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5361 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
5362 abi_long handle, abi_long mount_id,
5365 struct file_handle *target_fh;
5366 struct file_handle *fh;
5370 unsigned int size, total_size;
5372 if (get_user_s32(size, handle)) {
5373 return -TARGET_EFAULT;
5376 name = lock_user_string(pathname);
5378 return -TARGET_EFAULT;
5381 total_size = sizeof(struct file_handle) + size;
5382 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
5384 unlock_user(name, pathname, 0);
5385 return -TARGET_EFAULT;
5388 fh = g_malloc0(total_size);
5389 fh->handle_bytes = size;
5391 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
5392 unlock_user(name, pathname, 0);
5394 /* man name_to_handle_at(2):
5395 * Other than the use of the handle_bytes field, the caller should treat
5396 * the file_handle structure as an opaque data type
5399 memcpy(target_fh, fh, total_size);
5400 target_fh->handle_bytes = tswap32(fh->handle_bytes);
5401 target_fh->handle_type = tswap32(fh->handle_type);
5403 unlock_user(target_fh, handle, total_size);
5405 if (put_user_s32(mid, mount_id)) {
5406 return -TARGET_EFAULT;
5414 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5415 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
5418 struct file_handle *target_fh;
5419 struct file_handle *fh;
5420 unsigned int size, total_size;
5423 if (get_user_s32(size, handle)) {
5424 return -TARGET_EFAULT;
5427 total_size = sizeof(struct file_handle) + size;
5428 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
5430 return -TARGET_EFAULT;
5433 fh = g_memdup(target_fh, total_size);
5434 fh->handle_bytes = size;
5435 fh->handle_type = tswap32(target_fh->handle_type);
5437 ret = get_errno(open_by_handle_at(mount_fd, fh,
5438 target_to_host_bitmask(flags, fcntl_flags_tbl)));
5442 unlock_user(target_fh, handle, total_size);
5448 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5450 /* signalfd siginfo conversion */
5453 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
5454 const struct signalfd_siginfo *info)
5456 int sig = host_to_target_signal(info->ssi_signo);
5458 /* linux/signalfd.h defines a ssi_addr_lsb
5459 * not defined in sys/signalfd.h but used by some kernels
5462 #ifdef BUS_MCEERR_AO
5463 if (tinfo->ssi_signo == SIGBUS &&
5464 (tinfo->ssi_code == BUS_MCEERR_AR ||
5465 tinfo->ssi_code == BUS_MCEERR_AO)) {
5466 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
5467 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
5468 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
5472 tinfo->ssi_signo = tswap32(sig);
5473 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
5474 tinfo->ssi_code = tswap32(info->ssi_code);
5475 tinfo->ssi_pid = tswap32(info->ssi_pid);
5476 tinfo->ssi_uid = tswap32(info->ssi_uid);
5477 tinfo->ssi_fd = tswap32(info->ssi_fd);
5478 tinfo->ssi_tid = tswap32(info->ssi_tid);
5479 tinfo->ssi_band = tswap32(info->ssi_band);
5480 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
5481 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
5482 tinfo->ssi_status = tswap32(info->ssi_status);
5483 tinfo->ssi_int = tswap32(info->ssi_int);
5484 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
5485 tinfo->ssi_utime = tswap64(info->ssi_utime);
5486 tinfo->ssi_stime = tswap64(info->ssi_stime);
5487 tinfo->ssi_addr = tswap64(info->ssi_addr);
5490 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
5494 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
5495 host_to_target_signalfd_siginfo(buf + i, buf + i);
5501 static TargetFdTrans target_signalfd_trans = {
5502 .host_to_target_data = host_to_target_data_signalfd,
5505 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
5508 target_sigset_t *target_mask;
5512 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
5513 return -TARGET_EINVAL;
5515 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
5516 return -TARGET_EFAULT;
5519 target_to_host_sigset(&host_mask, target_mask);
5521 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
5523 ret = get_errno(signalfd(fd, &host_mask, host_flags));
5525 fd_trans_register(ret, &target_signalfd_trans);
5528 unlock_user_struct(target_mask, mask, 0);
5534 /* Map host to target signal numbers for the wait family of syscalls.
5535 Assume all other status bits are the same. */
5536 int host_to_target_waitstatus(int status)
5538 if (WIFSIGNALED(status)) {
5539 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5541 if (WIFSTOPPED(status)) {
5542 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5548 static int open_self_cmdline(void *cpu_env, int fd)
5551 bool word_skipped = false;
5553 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5563 nb_read = read(fd_orig, buf, sizeof(buf));
5565 fd_orig = close(fd_orig);
5567 } else if (nb_read == 0) {
5571 if (!word_skipped) {
5572 /* Skip the first string, which is the path to qemu-*-static
5573 instead of the actual command. */
5574 cp_buf = memchr(buf, 0, sizeof(buf));
5576 /* Null byte found, skip one string */
5578 nb_read -= cp_buf - buf;
5579 word_skipped = true;
5584 if (write(fd, cp_buf, nb_read) != nb_read) {
5591 return close(fd_orig);
5594 static int open_self_maps(void *cpu_env, int fd)
5596 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5597 TaskState *ts = cpu->opaque;
5603 fp = fopen("/proc/self/maps", "r");
5608 while ((read = getline(&line, &len, fp)) != -1) {
5609 int fields, dev_maj, dev_min, inode;
5610 uint64_t min, max, offset;
5611 char flag_r, flag_w, flag_x, flag_p;
5612 char path[512] = "";
5613 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5614 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5615 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5617 if ((fields < 10) || (fields > 11)) {
5620 if (h2g_valid(min)) {
5621 int flags = page_get_flags(h2g(min));
5622 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5623 if (page_check_range(h2g(min), max - min, flags) == -1) {
5626 if (h2g(min) == ts->info->stack_limit) {
5627 pstrcpy(path, sizeof(path), " [stack]");
5629 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5630 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5631 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5632 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5633 path[0] ? " " : "", path);
5643 static int open_self_stat(void *cpu_env, int fd)
5645 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5646 TaskState *ts = cpu->opaque;
5647 abi_ulong start_stack = ts->info->start_stack;
5650 for (i = 0; i < 44; i++) {
5658 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5659 } else if (i == 1) {
5661 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5662 } else if (i == 27) {
5665 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5667 /* for the rest, there is MasterCard */
5668 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5672 if (write(fd, buf, len) != len) {
5680 static int open_self_auxv(void *cpu_env, int fd)
5682 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5683 TaskState *ts = cpu->opaque;
5684 abi_ulong auxv = ts->info->saved_auxv;
5685 abi_ulong len = ts->info->auxv_len;
5689 * Auxiliary vector is stored in target process stack.
5690 * read in whole auxv vector and copy it to file
5692 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5696 r = write(fd, ptr, len);
5703 lseek(fd, 0, SEEK_SET);
5704 unlock_user(ptr, auxv, len);
5710 static int is_proc_myself(const char *filename, const char *entry)
5712 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5713 filename += strlen("/proc/");
5714 if (!strncmp(filename, "self/", strlen("self/"))) {
5715 filename += strlen("self/");
5716 } else if (*filename >= '1' && *filename <= '9') {
5718 snprintf(myself, sizeof(myself), "%d/", getpid());
5719 if (!strncmp(filename, myself, strlen(myself))) {
5720 filename += strlen(myself);
5727 if (!strcmp(filename, entry)) {
5734 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5735 static int is_proc(const char *filename, const char *entry)
5737 return strcmp(filename, entry) == 0;
5740 static int open_net_route(void *cpu_env, int fd)
5747 fp = fopen("/proc/net/route", "r");
5754 read = getline(&line, &len, fp);
5755 dprintf(fd, "%s", line);
5759 while ((read = getline(&line, &len, fp)) != -1) {
5761 uint32_t dest, gw, mask;
5762 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5763 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5764 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5765 &mask, &mtu, &window, &irtt);
5766 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5767 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5768 metric, tswap32(mask), mtu, window, irtt);
5778 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5781 const char *filename;
5782 int (*fill)(void *cpu_env, int fd);
5783 int (*cmp)(const char *s1, const char *s2);
5785 const struct fake_open *fake_open;
5786 static const struct fake_open fakes[] = {
5787 { "maps", open_self_maps, is_proc_myself },
5788 { "stat", open_self_stat, is_proc_myself },
5789 { "auxv", open_self_auxv, is_proc_myself },
5790 { "cmdline", open_self_cmdline, is_proc_myself },
5791 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5792 { "/proc/net/route", open_net_route, is_proc },
5794 { NULL, NULL, NULL }
5797 if (is_proc_myself(pathname, "exe")) {
5798 int execfd = qemu_getauxval(AT_EXECFD);
5799 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5802 for (fake_open = fakes; fake_open->filename; fake_open++) {
5803 if (fake_open->cmp(pathname, fake_open->filename)) {
5808 if (fake_open->filename) {
5810 char filename[PATH_MAX];
5813 /* create temporary file to map stat to */
5814 tmpdir = getenv("TMPDIR");
5817 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5818 fd = mkstemp(filename);
5824 if ((r = fake_open->fill(cpu_env, fd))) {
5828 lseek(fd, 0, SEEK_SET);
5833 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5836 #define TIMER_MAGIC 0x0caf0000
5837 #define TIMER_MAGIC_MASK 0xffff0000
5839 /* Convert QEMU provided timer ID back to internal 16bit index format */
5840 static target_timer_t get_timer_id(abi_long arg)
5842 target_timer_t timerid = arg;
5844 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5845 return -TARGET_EINVAL;
5850 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5851 return -TARGET_EINVAL;
5857 /* do_syscall() should always have a single exit point at the end so
5858 that actions, such as logging of syscall results, can be performed.
5859 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5860 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5861 abi_long arg2, abi_long arg3, abi_long arg4,
5862 abi_long arg5, abi_long arg6, abi_long arg7,
5865 CPUState *cpu = ENV_GET_CPU(cpu_env);
5872 gemu_log("syscall %d", num);
5875 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5878 case TARGET_NR_exit:
5879 /* In old applications this may be used to implement _exit(2).
5880 However in threaded applictions it is used for thread termination,
5881 and _exit_group is used for application termination.
5882 Do thread termination if we have more then one thread. */
5883 /* FIXME: This probably breaks if a signal arrives. We should probably
5884 be disabling signals. */
5885 if (CPU_NEXT(first_cpu)) {
5889 /* Remove the CPU from the list. */
5890 QTAILQ_REMOVE(&cpus, cpu, node);
5893 if (ts->child_tidptr) {
5894 put_user_u32(0, ts->child_tidptr);
5895 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5899 object_unref(OBJECT(cpu));
5901 rcu_unregister_thread();
5907 gdb_exit(cpu_env, arg1);
5909 ret = 0; /* avoid warning */
5911 case TARGET_NR_read:
5915 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5917 ret = get_errno(read(arg1, p, arg3));
5919 fd_trans_host_to_target_data(arg1)) {
5920 ret = fd_trans_host_to_target_data(arg1)(p, ret);
5922 unlock_user(p, arg2, ret);
5925 case TARGET_NR_write:
5926 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5928 ret = get_errno(write(arg1, p, arg3));
5929 unlock_user(p, arg2, 0);
5931 #ifdef TARGET_NR_open
5932 case TARGET_NR_open:
5933 if (!(p = lock_user_string(arg1)))
5935 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5936 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5938 fd_trans_unregister(ret);
5939 unlock_user(p, arg1, 0);
5942 case TARGET_NR_openat:
5943 if (!(p = lock_user_string(arg2)))
5945 ret = get_errno(do_openat(cpu_env, arg1, p,
5946 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5948 fd_trans_unregister(ret);
5949 unlock_user(p, arg2, 0);
5951 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5952 case TARGET_NR_name_to_handle_at:
5953 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
5956 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5957 case TARGET_NR_open_by_handle_at:
5958 ret = do_open_by_handle_at(arg1, arg2, arg3);
5959 fd_trans_unregister(ret);
5962 case TARGET_NR_close:
5963 fd_trans_unregister(arg1);
5964 ret = get_errno(close(arg1));
5969 #ifdef TARGET_NR_fork
5970 case TARGET_NR_fork:
5971 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5974 #ifdef TARGET_NR_waitpid
5975 case TARGET_NR_waitpid:
5978 ret = get_errno(waitpid(arg1, &status, arg3));
5979 if (!is_error(ret) && arg2 && ret
5980 && put_user_s32(host_to_target_waitstatus(status), arg2))
5985 #ifdef TARGET_NR_waitid
5986 case TARGET_NR_waitid:
5990 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5991 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5992 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5994 host_to_target_siginfo(p, &info);
5995 unlock_user(p, arg3, sizeof(target_siginfo_t));
6000 #ifdef TARGET_NR_creat /* not on alpha */
6001 case TARGET_NR_creat:
6002 if (!(p = lock_user_string(arg1)))
6004 ret = get_errno(creat(p, arg2));
6005 fd_trans_unregister(ret);
6006 unlock_user(p, arg1, 0);
6009 #ifdef TARGET_NR_link
6010 case TARGET_NR_link:
6013 p = lock_user_string(arg1);
6014 p2 = lock_user_string(arg2);
6016 ret = -TARGET_EFAULT;
6018 ret = get_errno(link(p, p2));
6019 unlock_user(p2, arg2, 0);
6020 unlock_user(p, arg1, 0);
6024 #if defined(TARGET_NR_linkat)
6025 case TARGET_NR_linkat:
6030 p = lock_user_string(arg2);
6031 p2 = lock_user_string(arg4);
6033 ret = -TARGET_EFAULT;
6035 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6036 unlock_user(p, arg2, 0);
6037 unlock_user(p2, arg4, 0);
6041 #ifdef TARGET_NR_unlink
6042 case TARGET_NR_unlink:
6043 if (!(p = lock_user_string(arg1)))
6045 ret = get_errno(unlink(p));
6046 unlock_user(p, arg1, 0);
6049 #if defined(TARGET_NR_unlinkat)
6050 case TARGET_NR_unlinkat:
6051 if (!(p = lock_user_string(arg2)))
6053 ret = get_errno(unlinkat(arg1, p, arg3));
6054 unlock_user(p, arg2, 0);
6057 case TARGET_NR_execve:
6059 char **argp, **envp;
6062 abi_ulong guest_argp;
6063 abi_ulong guest_envp;
6070 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6071 if (get_user_ual(addr, gp))
6079 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6080 if (get_user_ual(addr, gp))
6087 argp = alloca((argc + 1) * sizeof(void *));
6088 envp = alloca((envc + 1) * sizeof(void *));
6090 for (gp = guest_argp, q = argp; gp;
6091 gp += sizeof(abi_ulong), q++) {
6092 if (get_user_ual(addr, gp))
6096 if (!(*q = lock_user_string(addr)))
6098 total_size += strlen(*q) + 1;
6102 for (gp = guest_envp, q = envp; gp;
6103 gp += sizeof(abi_ulong), q++) {
6104 if (get_user_ual(addr, gp))
6108 if (!(*q = lock_user_string(addr)))
6110 total_size += strlen(*q) + 1;
6114 if (!(p = lock_user_string(arg1)))
6116 ret = get_errno(execve(p, argp, envp));
6117 unlock_user(p, arg1, 0);
6122 ret = -TARGET_EFAULT;
6125 for (gp = guest_argp, q = argp; *q;
6126 gp += sizeof(abi_ulong), q++) {
6127 if (get_user_ual(addr, gp)
6130 unlock_user(*q, addr, 0);
6132 for (gp = guest_envp, q = envp; *q;
6133 gp += sizeof(abi_ulong), q++) {
6134 if (get_user_ual(addr, gp)
6137 unlock_user(*q, addr, 0);
6141 case TARGET_NR_chdir:
6142 if (!(p = lock_user_string(arg1)))
6144 ret = get_errno(chdir(p));
6145 unlock_user(p, arg1, 0);
6147 #ifdef TARGET_NR_time
6148 case TARGET_NR_time:
6151 ret = get_errno(time(&host_time));
6154 && put_user_sal(host_time, arg1))
6159 #ifdef TARGET_NR_mknod
6160 case TARGET_NR_mknod:
6161 if (!(p = lock_user_string(arg1)))
6163 ret = get_errno(mknod(p, arg2, arg3));
6164 unlock_user(p, arg1, 0);
6167 #if defined(TARGET_NR_mknodat)
6168 case TARGET_NR_mknodat:
6169 if (!(p = lock_user_string(arg2)))
6171 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6172 unlock_user(p, arg2, 0);
6175 #ifdef TARGET_NR_chmod
6176 case TARGET_NR_chmod:
6177 if (!(p = lock_user_string(arg1)))
6179 ret = get_errno(chmod(p, arg2));
6180 unlock_user(p, arg1, 0);
6183 #ifdef TARGET_NR_break
6184 case TARGET_NR_break:
6187 #ifdef TARGET_NR_oldstat
6188 case TARGET_NR_oldstat:
6191 case TARGET_NR_lseek:
6192 ret = get_errno(lseek(arg1, arg2, arg3));
6194 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6195 /* Alpha specific */
6196 case TARGET_NR_getxpid:
6197 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
6198 ret = get_errno(getpid());
6201 #ifdef TARGET_NR_getpid
6202 case TARGET_NR_getpid:
6203 ret = get_errno(getpid());
6206 case TARGET_NR_mount:
6208 /* need to look at the data field */
6212 p = lock_user_string(arg1);
6220 p2 = lock_user_string(arg2);
6223 unlock_user(p, arg1, 0);
6229 p3 = lock_user_string(arg3);
6232 unlock_user(p, arg1, 0);
6234 unlock_user(p2, arg2, 0);
6241 /* FIXME - arg5 should be locked, but it isn't clear how to
6242 * do that since it's not guaranteed to be a NULL-terminated
6246 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
6248 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
6250 ret = get_errno(ret);
6253 unlock_user(p, arg1, 0);
6255 unlock_user(p2, arg2, 0);
6257 unlock_user(p3, arg3, 0);
6261 #ifdef TARGET_NR_umount
6262 case TARGET_NR_umount:
6263 if (!(p = lock_user_string(arg1)))
6265 ret = get_errno(umount(p));
6266 unlock_user(p, arg1, 0);
6269 #ifdef TARGET_NR_stime /* not on alpha */
6270 case TARGET_NR_stime:
6273 if (get_user_sal(host_time, arg1))
6275 ret = get_errno(stime(&host_time));
6279 case TARGET_NR_ptrace:
6281 #ifdef TARGET_NR_alarm /* not on alpha */
6282 case TARGET_NR_alarm:
6286 #ifdef TARGET_NR_oldfstat
6287 case TARGET_NR_oldfstat:
6290 #ifdef TARGET_NR_pause /* not on alpha */
6291 case TARGET_NR_pause:
6292 ret = get_errno(pause());
6295 #ifdef TARGET_NR_utime
6296 case TARGET_NR_utime:
6298 struct utimbuf tbuf, *host_tbuf;
6299 struct target_utimbuf *target_tbuf;
6301 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
6303 tbuf.actime = tswapal(target_tbuf->actime);
6304 tbuf.modtime = tswapal(target_tbuf->modtime);
6305 unlock_user_struct(target_tbuf, arg2, 0);
6310 if (!(p = lock_user_string(arg1)))
6312 ret = get_errno(utime(p, host_tbuf));
6313 unlock_user(p, arg1, 0);
6317 #ifdef TARGET_NR_utimes
6318 case TARGET_NR_utimes:
6320 struct timeval *tvp, tv[2];
6322 if (copy_from_user_timeval(&tv[0], arg2)
6323 || copy_from_user_timeval(&tv[1],
6324 arg2 + sizeof(struct target_timeval)))
6330 if (!(p = lock_user_string(arg1)))
6332 ret = get_errno(utimes(p, tvp));
6333 unlock_user(p, arg1, 0);
6337 #if defined(TARGET_NR_futimesat)
6338 case TARGET_NR_futimesat:
6340 struct timeval *tvp, tv[2];
6342 if (copy_from_user_timeval(&tv[0], arg3)
6343 || copy_from_user_timeval(&tv[1],
6344 arg3 + sizeof(struct target_timeval)))
6350 if (!(p = lock_user_string(arg2)))
6352 ret = get_errno(futimesat(arg1, path(p), tvp));
6353 unlock_user(p, arg2, 0);
6357 #ifdef TARGET_NR_stty
6358 case TARGET_NR_stty:
6361 #ifdef TARGET_NR_gtty
6362 case TARGET_NR_gtty:
6365 #ifdef TARGET_NR_access
6366 case TARGET_NR_access:
6367 if (!(p = lock_user_string(arg1)))
6369 ret = get_errno(access(path(p), arg2));
6370 unlock_user(p, arg1, 0);
6373 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6374 case TARGET_NR_faccessat:
6375 if (!(p = lock_user_string(arg2)))
6377 ret = get_errno(faccessat(arg1, p, arg3, 0));
6378 unlock_user(p, arg2, 0);
6381 #ifdef TARGET_NR_nice /* not on alpha */
6382 case TARGET_NR_nice:
6383 ret = get_errno(nice(arg1));
6386 #ifdef TARGET_NR_ftime
6387 case TARGET_NR_ftime:
6390 case TARGET_NR_sync:
6394 case TARGET_NR_kill:
6395 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6397 #ifdef TARGET_NR_rename
6398 case TARGET_NR_rename:
6401 p = lock_user_string(arg1);
6402 p2 = lock_user_string(arg2);
6404 ret = -TARGET_EFAULT;
6406 ret = get_errno(rename(p, p2));
6407 unlock_user(p2, arg2, 0);
6408 unlock_user(p, arg1, 0);
6412 #if defined(TARGET_NR_renameat)
6413 case TARGET_NR_renameat:
6416 p = lock_user_string(arg2);
6417 p2 = lock_user_string(arg4);
6419 ret = -TARGET_EFAULT;
6421 ret = get_errno(renameat(arg1, p, arg3, p2));
6422 unlock_user(p2, arg4, 0);
6423 unlock_user(p, arg2, 0);
6427 #ifdef TARGET_NR_mkdir
6428 case TARGET_NR_mkdir:
6429 if (!(p = lock_user_string(arg1)))
6431 ret = get_errno(mkdir(p, arg2));
6432 unlock_user(p, arg1, 0);
6435 #if defined(TARGET_NR_mkdirat)
6436 case TARGET_NR_mkdirat:
6437 if (!(p = lock_user_string(arg2)))
6439 ret = get_errno(mkdirat(arg1, p, arg3));
6440 unlock_user(p, arg2, 0);
6443 #ifdef TARGET_NR_rmdir
6444 case TARGET_NR_rmdir:
6445 if (!(p = lock_user_string(arg1)))
6447 ret = get_errno(rmdir(p));
6448 unlock_user(p, arg1, 0);
6452 ret = get_errno(dup(arg1));
6454 fd_trans_dup(arg1, ret);
6457 #ifdef TARGET_NR_pipe
6458 case TARGET_NR_pipe:
6459 ret = do_pipe(cpu_env, arg1, 0, 0);
6462 #ifdef TARGET_NR_pipe2
6463 case TARGET_NR_pipe2:
6464 ret = do_pipe(cpu_env, arg1,
6465 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6468 case TARGET_NR_times:
6470 struct target_tms *tmsp;
6472 ret = get_errno(times(&tms));
6474 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6477 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6478 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6479 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6480 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6483 ret = host_to_target_clock_t(ret);
6486 #ifdef TARGET_NR_prof
6487 case TARGET_NR_prof:
6490 #ifdef TARGET_NR_signal
6491 case TARGET_NR_signal:
6494 case TARGET_NR_acct:
6496 ret = get_errno(acct(NULL));
6498 if (!(p = lock_user_string(arg1)))
6500 ret = get_errno(acct(path(p)));
6501 unlock_user(p, arg1, 0);
6504 #ifdef TARGET_NR_umount2
6505 case TARGET_NR_umount2:
6506 if (!(p = lock_user_string(arg1)))
6508 ret = get_errno(umount2(p, arg2));
6509 unlock_user(p, arg1, 0);
6512 #ifdef TARGET_NR_lock
6513 case TARGET_NR_lock:
6516 case TARGET_NR_ioctl:
6517 ret = do_ioctl(arg1, arg2, arg3);
6519 case TARGET_NR_fcntl:
6520 ret = do_fcntl(arg1, arg2, arg3);
6522 #ifdef TARGET_NR_mpx
6526 case TARGET_NR_setpgid:
6527 ret = get_errno(setpgid(arg1, arg2));
6529 #ifdef TARGET_NR_ulimit
6530 case TARGET_NR_ulimit:
6533 #ifdef TARGET_NR_oldolduname
6534 case TARGET_NR_oldolduname:
6537 case TARGET_NR_umask:
6538 ret = get_errno(umask(arg1));
6540 case TARGET_NR_chroot:
6541 if (!(p = lock_user_string(arg1)))
6543 ret = get_errno(chroot(p));
6544 unlock_user(p, arg1, 0);
6546 #ifdef TARGET_NR_ustat
6547 case TARGET_NR_ustat:
6550 #ifdef TARGET_NR_dup2
6551 case TARGET_NR_dup2:
6552 ret = get_errno(dup2(arg1, arg2));
6554 fd_trans_dup(arg1, arg2);
6558 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6559 case TARGET_NR_dup3:
6560 ret = get_errno(dup3(arg1, arg2, arg3));
6562 fd_trans_dup(arg1, arg2);
6566 #ifdef TARGET_NR_getppid /* not on alpha */
6567 case TARGET_NR_getppid:
6568 ret = get_errno(getppid());
6571 #ifdef TARGET_NR_getpgrp
6572 case TARGET_NR_getpgrp:
6573 ret = get_errno(getpgrp());
6576 case TARGET_NR_setsid:
6577 ret = get_errno(setsid());
6579 #ifdef TARGET_NR_sigaction
6580 case TARGET_NR_sigaction:
6582 #if defined(TARGET_ALPHA)
6583 struct target_sigaction act, oact, *pact = 0;
6584 struct target_old_sigaction *old_act;
6586 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6588 act._sa_handler = old_act->_sa_handler;
6589 target_siginitset(&act.sa_mask, old_act->sa_mask);
6590 act.sa_flags = old_act->sa_flags;
6591 act.sa_restorer = 0;
6592 unlock_user_struct(old_act, arg2, 0);
6595 ret = get_errno(do_sigaction(arg1, pact, &oact));
6596 if (!is_error(ret) && arg3) {
6597 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6599 old_act->_sa_handler = oact._sa_handler;
6600 old_act->sa_mask = oact.sa_mask.sig[0];
6601 old_act->sa_flags = oact.sa_flags;
6602 unlock_user_struct(old_act, arg3, 1);
6604 #elif defined(TARGET_MIPS)
6605 struct target_sigaction act, oact, *pact, *old_act;
6608 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6610 act._sa_handler = old_act->_sa_handler;
6611 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6612 act.sa_flags = old_act->sa_flags;
6613 unlock_user_struct(old_act, arg2, 0);
6619 ret = get_errno(do_sigaction(arg1, pact, &oact));
6621 if (!is_error(ret) && arg3) {
6622 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6624 old_act->_sa_handler = oact._sa_handler;
6625 old_act->sa_flags = oact.sa_flags;
6626 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6627 old_act->sa_mask.sig[1] = 0;
6628 old_act->sa_mask.sig[2] = 0;
6629 old_act->sa_mask.sig[3] = 0;
6630 unlock_user_struct(old_act, arg3, 1);
6633 struct target_old_sigaction *old_act;
6634 struct target_sigaction act, oact, *pact;
6636 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6638 act._sa_handler = old_act->_sa_handler;
6639 target_siginitset(&act.sa_mask, old_act->sa_mask);
6640 act.sa_flags = old_act->sa_flags;
6641 act.sa_restorer = old_act->sa_restorer;
6642 unlock_user_struct(old_act, arg2, 0);
6647 ret = get_errno(do_sigaction(arg1, pact, &oact));
6648 if (!is_error(ret) && arg3) {
6649 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6651 old_act->_sa_handler = oact._sa_handler;
6652 old_act->sa_mask = oact.sa_mask.sig[0];
6653 old_act->sa_flags = oact.sa_flags;
6654 old_act->sa_restorer = oact.sa_restorer;
6655 unlock_user_struct(old_act, arg3, 1);
6661 case TARGET_NR_rt_sigaction:
6663 #if defined(TARGET_ALPHA)
6664 struct target_sigaction act, oact, *pact = 0;
6665 struct target_rt_sigaction *rt_act;
6666 /* ??? arg4 == sizeof(sigset_t). */
6668 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6670 act._sa_handler = rt_act->_sa_handler;
6671 act.sa_mask = rt_act->sa_mask;
6672 act.sa_flags = rt_act->sa_flags;
6673 act.sa_restorer = arg5;
6674 unlock_user_struct(rt_act, arg2, 0);
6677 ret = get_errno(do_sigaction(arg1, pact, &oact));
6678 if (!is_error(ret) && arg3) {
6679 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6681 rt_act->_sa_handler = oact._sa_handler;
6682 rt_act->sa_mask = oact.sa_mask;
6683 rt_act->sa_flags = oact.sa_flags;
6684 unlock_user_struct(rt_act, arg3, 1);
6687 struct target_sigaction *act;
6688 struct target_sigaction *oact;
6691 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6696 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6697 ret = -TARGET_EFAULT;
6698 goto rt_sigaction_fail;
6702 ret = get_errno(do_sigaction(arg1, act, oact));
6705 unlock_user_struct(act, arg2, 0);
6707 unlock_user_struct(oact, arg3, 1);
6711 #ifdef TARGET_NR_sgetmask /* not on alpha */
6712 case TARGET_NR_sgetmask:
6715 abi_ulong target_set;
6716 do_sigprocmask(0, NULL, &cur_set);
6717 host_to_target_old_sigset(&target_set, &cur_set);
6722 #ifdef TARGET_NR_ssetmask /* not on alpha */
6723 case TARGET_NR_ssetmask:
6725 sigset_t set, oset, cur_set;
6726 abi_ulong target_set = arg1;
6727 do_sigprocmask(0, NULL, &cur_set);
6728 target_to_host_old_sigset(&set, &target_set);
6729 sigorset(&set, &set, &cur_set);
6730 do_sigprocmask(SIG_SETMASK, &set, &oset);
6731 host_to_target_old_sigset(&target_set, &oset);
6736 #ifdef TARGET_NR_sigprocmask
6737 case TARGET_NR_sigprocmask:
6739 #if defined(TARGET_ALPHA)
6740 sigset_t set, oldset;
6745 case TARGET_SIG_BLOCK:
6748 case TARGET_SIG_UNBLOCK:
6751 case TARGET_SIG_SETMASK:
6755 ret = -TARGET_EINVAL;
6759 target_to_host_old_sigset(&set, &mask);
6761 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6762 if (!is_error(ret)) {
6763 host_to_target_old_sigset(&mask, &oldset);
6765 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6768 sigset_t set, oldset, *set_ptr;
6773 case TARGET_SIG_BLOCK:
6776 case TARGET_SIG_UNBLOCK:
6779 case TARGET_SIG_SETMASK:
6783 ret = -TARGET_EINVAL;
6786 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6788 target_to_host_old_sigset(&set, p);
6789 unlock_user(p, arg2, 0);
6795 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6796 if (!is_error(ret) && arg3) {
6797 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6799 host_to_target_old_sigset(p, &oldset);
6800 unlock_user(p, arg3, sizeof(target_sigset_t));
6806 case TARGET_NR_rt_sigprocmask:
6809 sigset_t set, oldset, *set_ptr;
6813 case TARGET_SIG_BLOCK:
6816 case TARGET_SIG_UNBLOCK:
6819 case TARGET_SIG_SETMASK:
6823 ret = -TARGET_EINVAL;
6826 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6828 target_to_host_sigset(&set, p);
6829 unlock_user(p, arg2, 0);
6835 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6836 if (!is_error(ret) && arg3) {
6837 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6839 host_to_target_sigset(p, &oldset);
6840 unlock_user(p, arg3, sizeof(target_sigset_t));
6844 #ifdef TARGET_NR_sigpending
6845 case TARGET_NR_sigpending:
6848 ret = get_errno(sigpending(&set));
6849 if (!is_error(ret)) {
6850 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6852 host_to_target_old_sigset(p, &set);
6853 unlock_user(p, arg1, sizeof(target_sigset_t));
6858 case TARGET_NR_rt_sigpending:
6861 ret = get_errno(sigpending(&set));
6862 if (!is_error(ret)) {
6863 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6865 host_to_target_sigset(p, &set);
6866 unlock_user(p, arg1, sizeof(target_sigset_t));
6870 #ifdef TARGET_NR_sigsuspend
6871 case TARGET_NR_sigsuspend:
6874 #if defined(TARGET_ALPHA)
6875 abi_ulong mask = arg1;
6876 target_to_host_old_sigset(&set, &mask);
6878 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6880 target_to_host_old_sigset(&set, p);
6881 unlock_user(p, arg1, 0);
6883 ret = get_errno(sigsuspend(&set));
6887 case TARGET_NR_rt_sigsuspend:
6890 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6892 target_to_host_sigset(&set, p);
6893 unlock_user(p, arg1, 0);
6894 ret = get_errno(sigsuspend(&set));
6897 case TARGET_NR_rt_sigtimedwait:
6900 struct timespec uts, *puts;
6903 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6905 target_to_host_sigset(&set, p);
6906 unlock_user(p, arg1, 0);
6909 target_to_host_timespec(puts, arg3);
6913 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6914 if (!is_error(ret)) {
6916 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6921 host_to_target_siginfo(p, &uinfo);
6922 unlock_user(p, arg2, sizeof(target_siginfo_t));
6924 ret = host_to_target_signal(ret);
6928 case TARGET_NR_rt_sigqueueinfo:
6931 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6933 target_to_host_siginfo(&uinfo, p);
6934 unlock_user(p, arg1, 0);
6935 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6938 #ifdef TARGET_NR_sigreturn
6939 case TARGET_NR_sigreturn:
6940 /* NOTE: ret is eax, so not transcoding must be done */
6941 ret = do_sigreturn(cpu_env);
6944 case TARGET_NR_rt_sigreturn:
6945 /* NOTE: ret is eax, so not transcoding must be done */
6946 ret = do_rt_sigreturn(cpu_env);
6948 case TARGET_NR_sethostname:
6949 if (!(p = lock_user_string(arg1)))
6951 ret = get_errno(sethostname(p, arg2));
6952 unlock_user(p, arg1, 0);
6954 case TARGET_NR_setrlimit:
6956 int resource = target_to_host_resource(arg1);
6957 struct target_rlimit *target_rlim;
6959 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6961 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6962 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6963 unlock_user_struct(target_rlim, arg2, 0);
6964 ret = get_errno(setrlimit(resource, &rlim));
6967 case TARGET_NR_getrlimit:
6969 int resource = target_to_host_resource(arg1);
6970 struct target_rlimit *target_rlim;
6973 ret = get_errno(getrlimit(resource, &rlim));
6974 if (!is_error(ret)) {
6975 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6977 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6978 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6979 unlock_user_struct(target_rlim, arg2, 1);
6983 case TARGET_NR_getrusage:
6985 struct rusage rusage;
6986 ret = get_errno(getrusage(arg1, &rusage));
6987 if (!is_error(ret)) {
6988 ret = host_to_target_rusage(arg2, &rusage);
6992 case TARGET_NR_gettimeofday:
6995 ret = get_errno(gettimeofday(&tv, NULL));
6996 if (!is_error(ret)) {
6997 if (copy_to_user_timeval(arg1, &tv))
7002 case TARGET_NR_settimeofday:
7004 struct timeval tv, *ptv = NULL;
7005 struct timezone tz, *ptz = NULL;
7008 if (copy_from_user_timeval(&tv, arg1)) {
7015 if (copy_from_user_timezone(&tz, arg2)) {
7021 ret = get_errno(settimeofday(ptv, ptz));
7024 #if defined(TARGET_NR_select)
7025 case TARGET_NR_select:
7026 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7027 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7030 struct target_sel_arg_struct *sel;
7031 abi_ulong inp, outp, exp, tvp;
7034 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7036 nsel = tswapal(sel->n);
7037 inp = tswapal(sel->inp);
7038 outp = tswapal(sel->outp);
7039 exp = tswapal(sel->exp);
7040 tvp = tswapal(sel->tvp);
7041 unlock_user_struct(sel, arg1, 0);
7042 ret = do_select(nsel, inp, outp, exp, tvp);
7047 #ifdef TARGET_NR_pselect6
7048 case TARGET_NR_pselect6:
7050 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7051 fd_set rfds, wfds, efds;
7052 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7053 struct timespec ts, *ts_ptr;
7056 * The 6th arg is actually two args smashed together,
7057 * so we cannot use the C library.
7065 abi_ulong arg_sigset, arg_sigsize, *arg7;
7066 target_sigset_t *target_sigset;
7074 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7078 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7082 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7088 * This takes a timespec, and not a timeval, so we cannot
7089 * use the do_select() helper ...
7092 if (target_to_host_timespec(&ts, ts_addr)) {
7100 /* Extract the two packed args for the sigset */
7103 sig.size = _NSIG / 8;
7105 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7109 arg_sigset = tswapal(arg7[0]);
7110 arg_sigsize = tswapal(arg7[1]);
7111 unlock_user(arg7, arg6, 0);
7115 if (arg_sigsize != sizeof(*target_sigset)) {
7116 /* Like the kernel, we enforce correct size sigsets */
7117 ret = -TARGET_EINVAL;
7120 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7121 sizeof(*target_sigset), 1);
7122 if (!target_sigset) {
7125 target_to_host_sigset(&set, target_sigset);
7126 unlock_user(target_sigset, arg_sigset, 0);
7134 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7137 if (!is_error(ret)) {
7138 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7140 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7142 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7145 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7151 #ifdef TARGET_NR_symlink
7152 case TARGET_NR_symlink:
7155 p = lock_user_string(arg1);
7156 p2 = lock_user_string(arg2);
7158 ret = -TARGET_EFAULT;
7160 ret = get_errno(symlink(p, p2));
7161 unlock_user(p2, arg2, 0);
7162 unlock_user(p, arg1, 0);
7166 #if defined(TARGET_NR_symlinkat)
7167 case TARGET_NR_symlinkat:
7170 p = lock_user_string(arg1);
7171 p2 = lock_user_string(arg3);
7173 ret = -TARGET_EFAULT;
7175 ret = get_errno(symlinkat(p, arg2, p2));
7176 unlock_user(p2, arg3, 0);
7177 unlock_user(p, arg1, 0);
7181 #ifdef TARGET_NR_oldlstat
7182 case TARGET_NR_oldlstat:
7185 #ifdef TARGET_NR_readlink
7186 case TARGET_NR_readlink:
7189 p = lock_user_string(arg1);
7190 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7192 ret = -TARGET_EFAULT;
7194 /* Short circuit this for the magic exe check. */
7195 ret = -TARGET_EINVAL;
7196 } else if (is_proc_myself((const char *)p, "exe")) {
7197 char real[PATH_MAX], *temp;
7198 temp = realpath(exec_path, real);
7199 /* Return value is # of bytes that we wrote to the buffer. */
7201 ret = get_errno(-1);
7203 /* Don't worry about sign mismatch as earlier mapping
7204 * logic would have thrown a bad address error. */
7205 ret = MIN(strlen(real), arg3);
7206 /* We cannot NUL terminate the string. */
7207 memcpy(p2, real, ret);
7210 ret = get_errno(readlink(path(p), p2, arg3));
7212 unlock_user(p2, arg2, ret);
7213 unlock_user(p, arg1, 0);
7217 #if defined(TARGET_NR_readlinkat)
7218 case TARGET_NR_readlinkat:
7221 p = lock_user_string(arg2);
7222 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7224 ret = -TARGET_EFAULT;
7225 } else if (is_proc_myself((const char *)p, "exe")) {
7226 char real[PATH_MAX], *temp;
7227 temp = realpath(exec_path, real);
7228 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
7229 snprintf((char *)p2, arg4, "%s", real);
7231 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
7233 unlock_user(p2, arg3, ret);
7234 unlock_user(p, arg2, 0);
7238 #ifdef TARGET_NR_uselib
7239 case TARGET_NR_uselib:
7242 #ifdef TARGET_NR_swapon
7243 case TARGET_NR_swapon:
7244 if (!(p = lock_user_string(arg1)))
7246 ret = get_errno(swapon(p, arg2));
7247 unlock_user(p, arg1, 0);
7250 case TARGET_NR_reboot:
7251 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
7252 /* arg4 must be ignored in all other cases */
7253 p = lock_user_string(arg4);
7257 ret = get_errno(reboot(arg1, arg2, arg3, p));
7258 unlock_user(p, arg4, 0);
7260 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
7263 #ifdef TARGET_NR_readdir
7264 case TARGET_NR_readdir:
7267 #ifdef TARGET_NR_mmap
7268 case TARGET_NR_mmap:
7269 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7270 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7271 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7272 || defined(TARGET_S390X)
7275 abi_ulong v1, v2, v3, v4, v5, v6;
7276 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
7284 unlock_user(v, arg1, 0);
7285 ret = get_errno(target_mmap(v1, v2, v3,
7286 target_to_host_bitmask(v4, mmap_flags_tbl),
7290 ret = get_errno(target_mmap(arg1, arg2, arg3,
7291 target_to_host_bitmask(arg4, mmap_flags_tbl),
7297 #ifdef TARGET_NR_mmap2
7298 case TARGET_NR_mmap2:
7300 #define MMAP_SHIFT 12
7302 ret = get_errno(target_mmap(arg1, arg2, arg3,
7303 target_to_host_bitmask(arg4, mmap_flags_tbl),
7305 arg6 << MMAP_SHIFT));
7308 case TARGET_NR_munmap:
7309 ret = get_errno(target_munmap(arg1, arg2));
7311 case TARGET_NR_mprotect:
7313 TaskState *ts = cpu->opaque;
7314 /* Special hack to detect libc making the stack executable. */
7315 if ((arg3 & PROT_GROWSDOWN)
7316 && arg1 >= ts->info->stack_limit
7317 && arg1 <= ts->info->start_stack) {
7318 arg3 &= ~PROT_GROWSDOWN;
7319 arg2 = arg2 + arg1 - ts->info->stack_limit;
7320 arg1 = ts->info->stack_limit;
7323 ret = get_errno(target_mprotect(arg1, arg2, arg3));
7325 #ifdef TARGET_NR_mremap
7326 case TARGET_NR_mremap:
7327 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
7330 /* ??? msync/mlock/munlock are broken for softmmu. */
7331 #ifdef TARGET_NR_msync
7332 case TARGET_NR_msync:
7333 ret = get_errno(msync(g2h(arg1), arg2, arg3));
7336 #ifdef TARGET_NR_mlock
7337 case TARGET_NR_mlock:
7338 ret = get_errno(mlock(g2h(arg1), arg2));
7341 #ifdef TARGET_NR_munlock
7342 case TARGET_NR_munlock:
7343 ret = get_errno(munlock(g2h(arg1), arg2));
7346 #ifdef TARGET_NR_mlockall
7347 case TARGET_NR_mlockall:
7348 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
7351 #ifdef TARGET_NR_munlockall
7352 case TARGET_NR_munlockall:
7353 ret = get_errno(munlockall());
7356 case TARGET_NR_truncate:
7357 if (!(p = lock_user_string(arg1)))
7359 ret = get_errno(truncate(p, arg2));
7360 unlock_user(p, arg1, 0);
7362 case TARGET_NR_ftruncate:
7363 ret = get_errno(ftruncate(arg1, arg2));
7365 case TARGET_NR_fchmod:
7366 ret = get_errno(fchmod(arg1, arg2));
7368 #if defined(TARGET_NR_fchmodat)
7369 case TARGET_NR_fchmodat:
7370 if (!(p = lock_user_string(arg2)))
7372 ret = get_errno(fchmodat(arg1, p, arg3, 0));
7373 unlock_user(p, arg2, 0);
7376 case TARGET_NR_getpriority:
7377 /* Note that negative values are valid for getpriority, so we must
7378 differentiate based on errno settings. */
7380 ret = getpriority(arg1, arg2);
7381 if (ret == -1 && errno != 0) {
7382 ret = -host_to_target_errno(errno);
7386 /* Return value is the unbiased priority. Signal no error. */
7387 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
7389 /* Return value is a biased priority to avoid negative numbers. */
7393 case TARGET_NR_setpriority:
7394 ret = get_errno(setpriority(arg1, arg2, arg3));
7396 #ifdef TARGET_NR_profil
7397 case TARGET_NR_profil:
7400 case TARGET_NR_statfs:
7401 if (!(p = lock_user_string(arg1)))
7403 ret = get_errno(statfs(path(p), &stfs));
7404 unlock_user(p, arg1, 0);
7406 if (!is_error(ret)) {
7407 struct target_statfs *target_stfs;
7409 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7411 __put_user(stfs.f_type, &target_stfs->f_type);
7412 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7413 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7414 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7415 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7416 __put_user(stfs.f_files, &target_stfs->f_files);
7417 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7418 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7419 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7420 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7421 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7422 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7423 unlock_user_struct(target_stfs, arg2, 1);
7426 case TARGET_NR_fstatfs:
7427 ret = get_errno(fstatfs(arg1, &stfs));
7428 goto convert_statfs;
7429 #ifdef TARGET_NR_statfs64
7430 case TARGET_NR_statfs64:
7431 if (!(p = lock_user_string(arg1)))
7433 ret = get_errno(statfs(path(p), &stfs));
7434 unlock_user(p, arg1, 0);
7436 if (!is_error(ret)) {
7437 struct target_statfs64 *target_stfs;
7439 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7441 __put_user(stfs.f_type, &target_stfs->f_type);
7442 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7443 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7444 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7445 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7446 __put_user(stfs.f_files, &target_stfs->f_files);
7447 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7448 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7449 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7450 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7451 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7452 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7453 unlock_user_struct(target_stfs, arg3, 1);
7456 case TARGET_NR_fstatfs64:
7457 ret = get_errno(fstatfs(arg1, &stfs));
7458 goto convert_statfs64;
7460 #ifdef TARGET_NR_ioperm
7461 case TARGET_NR_ioperm:
7464 #ifdef TARGET_NR_socketcall
7465 case TARGET_NR_socketcall:
7466 ret = do_socketcall(arg1, arg2);
7469 #ifdef TARGET_NR_accept
7470 case TARGET_NR_accept:
7471 ret = do_accept4(arg1, arg2, arg3, 0);
7474 #ifdef TARGET_NR_accept4
7475 case TARGET_NR_accept4:
7476 #ifdef CONFIG_ACCEPT4
7477 ret = do_accept4(arg1, arg2, arg3, arg4);
7483 #ifdef TARGET_NR_bind
7484 case TARGET_NR_bind:
7485 ret = do_bind(arg1, arg2, arg3);
7488 #ifdef TARGET_NR_connect
7489 case TARGET_NR_connect:
7490 ret = do_connect(arg1, arg2, arg3);
7493 #ifdef TARGET_NR_getpeername
7494 case TARGET_NR_getpeername:
7495 ret = do_getpeername(arg1, arg2, arg3);
7498 #ifdef TARGET_NR_getsockname
7499 case TARGET_NR_getsockname:
7500 ret = do_getsockname(arg1, arg2, arg3);
7503 #ifdef TARGET_NR_getsockopt
7504 case TARGET_NR_getsockopt:
7505 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7508 #ifdef TARGET_NR_listen
7509 case TARGET_NR_listen:
7510 ret = get_errno(listen(arg1, arg2));
7513 #ifdef TARGET_NR_recv
7514 case TARGET_NR_recv:
7515 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7518 #ifdef TARGET_NR_recvfrom
7519 case TARGET_NR_recvfrom:
7520 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7523 #ifdef TARGET_NR_recvmsg
7524 case TARGET_NR_recvmsg:
7525 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7528 #ifdef TARGET_NR_send
7529 case TARGET_NR_send:
7530 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7533 #ifdef TARGET_NR_sendmsg
7534 case TARGET_NR_sendmsg:
7535 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7538 #ifdef TARGET_NR_sendmmsg
7539 case TARGET_NR_sendmmsg:
7540 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7542 case TARGET_NR_recvmmsg:
7543 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7546 #ifdef TARGET_NR_sendto
7547 case TARGET_NR_sendto:
7548 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7551 #ifdef TARGET_NR_shutdown
7552 case TARGET_NR_shutdown:
7553 ret = get_errno(shutdown(arg1, arg2));
7556 #ifdef TARGET_NR_socket
7557 case TARGET_NR_socket:
7558 ret = do_socket(arg1, arg2, arg3);
7559 fd_trans_unregister(ret);
7562 #ifdef TARGET_NR_socketpair
7563 case TARGET_NR_socketpair:
7564 ret = do_socketpair(arg1, arg2, arg3, arg4);
7567 #ifdef TARGET_NR_setsockopt
7568 case TARGET_NR_setsockopt:
7569 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7573 case TARGET_NR_syslog:
7574 if (!(p = lock_user_string(arg2)))
7576 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7577 unlock_user(p, arg2, 0);
7580 case TARGET_NR_setitimer:
7582 struct itimerval value, ovalue, *pvalue;
7586 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7587 || copy_from_user_timeval(&pvalue->it_value,
7588 arg2 + sizeof(struct target_timeval)))
7593 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7594 if (!is_error(ret) && arg3) {
7595 if (copy_to_user_timeval(arg3,
7596 &ovalue.it_interval)
7597 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7603 case TARGET_NR_getitimer:
7605 struct itimerval value;
7607 ret = get_errno(getitimer(arg1, &value));
7608 if (!is_error(ret) && arg2) {
7609 if (copy_to_user_timeval(arg2,
7611 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7617 #ifdef TARGET_NR_stat
7618 case TARGET_NR_stat:
7619 if (!(p = lock_user_string(arg1)))
7621 ret = get_errno(stat(path(p), &st));
7622 unlock_user(p, arg1, 0);
7625 #ifdef TARGET_NR_lstat
7626 case TARGET_NR_lstat:
7627 if (!(p = lock_user_string(arg1)))
7629 ret = get_errno(lstat(path(p), &st));
7630 unlock_user(p, arg1, 0);
7633 case TARGET_NR_fstat:
7635 ret = get_errno(fstat(arg1, &st));
7636 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7639 if (!is_error(ret)) {
7640 struct target_stat *target_st;
7642 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7644 memset(target_st, 0, sizeof(*target_st));
7645 __put_user(st.st_dev, &target_st->st_dev);
7646 __put_user(st.st_ino, &target_st->st_ino);
7647 __put_user(st.st_mode, &target_st->st_mode);
7648 __put_user(st.st_uid, &target_st->st_uid);
7649 __put_user(st.st_gid, &target_st->st_gid);
7650 __put_user(st.st_nlink, &target_st->st_nlink);
7651 __put_user(st.st_rdev, &target_st->st_rdev);
7652 __put_user(st.st_size, &target_st->st_size);
7653 __put_user(st.st_blksize, &target_st->st_blksize);
7654 __put_user(st.st_blocks, &target_st->st_blocks);
7655 __put_user(st.st_atime, &target_st->target_st_atime);
7656 __put_user(st.st_mtime, &target_st->target_st_mtime);
7657 __put_user(st.st_ctime, &target_st->target_st_ctime);
7658 unlock_user_struct(target_st, arg2, 1);
7662 #ifdef TARGET_NR_olduname
7663 case TARGET_NR_olduname:
7666 #ifdef TARGET_NR_iopl
7667 case TARGET_NR_iopl:
7670 case TARGET_NR_vhangup:
7671 ret = get_errno(vhangup());
7673 #ifdef TARGET_NR_idle
7674 case TARGET_NR_idle:
7677 #ifdef TARGET_NR_syscall
7678 case TARGET_NR_syscall:
7679 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7680 arg6, arg7, arg8, 0);
7683 case TARGET_NR_wait4:
7686 abi_long status_ptr = arg2;
7687 struct rusage rusage, *rusage_ptr;
7688 abi_ulong target_rusage = arg4;
7689 abi_long rusage_err;
7691 rusage_ptr = &rusage;
7694 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7695 if (!is_error(ret)) {
7696 if (status_ptr && ret) {
7697 status = host_to_target_waitstatus(status);
7698 if (put_user_s32(status, status_ptr))
7701 if (target_rusage) {
7702 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7710 #ifdef TARGET_NR_swapoff
7711 case TARGET_NR_swapoff:
7712 if (!(p = lock_user_string(arg1)))
7714 ret = get_errno(swapoff(p));
7715 unlock_user(p, arg1, 0);
7718 case TARGET_NR_sysinfo:
7720 struct target_sysinfo *target_value;
7721 struct sysinfo value;
7722 ret = get_errno(sysinfo(&value));
7723 if (!is_error(ret) && arg1)
7725 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7727 __put_user(value.uptime, &target_value->uptime);
7728 __put_user(value.loads[0], &target_value->loads[0]);
7729 __put_user(value.loads[1], &target_value->loads[1]);
7730 __put_user(value.loads[2], &target_value->loads[2]);
7731 __put_user(value.totalram, &target_value->totalram);
7732 __put_user(value.freeram, &target_value->freeram);
7733 __put_user(value.sharedram, &target_value->sharedram);
7734 __put_user(value.bufferram, &target_value->bufferram);
7735 __put_user(value.totalswap, &target_value->totalswap);
7736 __put_user(value.freeswap, &target_value->freeswap);
7737 __put_user(value.procs, &target_value->procs);
7738 __put_user(value.totalhigh, &target_value->totalhigh);
7739 __put_user(value.freehigh, &target_value->freehigh);
7740 __put_user(value.mem_unit, &target_value->mem_unit);
7741 unlock_user_struct(target_value, arg1, 1);
7745 #ifdef TARGET_NR_ipc
7747 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7750 #ifdef TARGET_NR_semget
7751 case TARGET_NR_semget:
7752 ret = get_errno(semget(arg1, arg2, arg3));
7755 #ifdef TARGET_NR_semop
7756 case TARGET_NR_semop:
7757 ret = do_semop(arg1, arg2, arg3);
7760 #ifdef TARGET_NR_semctl
7761 case TARGET_NR_semctl:
7762 ret = do_semctl(arg1, arg2, arg3, arg4);
7765 #ifdef TARGET_NR_msgctl
7766 case TARGET_NR_msgctl:
7767 ret = do_msgctl(arg1, arg2, arg3);
7770 #ifdef TARGET_NR_msgget
7771 case TARGET_NR_msgget:
7772 ret = get_errno(msgget(arg1, arg2));
7775 #ifdef TARGET_NR_msgrcv
7776 case TARGET_NR_msgrcv:
7777 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7780 #ifdef TARGET_NR_msgsnd
7781 case TARGET_NR_msgsnd:
7782 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7785 #ifdef TARGET_NR_shmget
7786 case TARGET_NR_shmget:
7787 ret = get_errno(shmget(arg1, arg2, arg3));
7790 #ifdef TARGET_NR_shmctl
7791 case TARGET_NR_shmctl:
7792 ret = do_shmctl(arg1, arg2, arg3);
7795 #ifdef TARGET_NR_shmat
7796 case TARGET_NR_shmat:
7797 ret = do_shmat(arg1, arg2, arg3);
7800 #ifdef TARGET_NR_shmdt
7801 case TARGET_NR_shmdt:
7802 ret = do_shmdt(arg1);
7805 case TARGET_NR_fsync:
7806 ret = get_errno(fsync(arg1));
7808 case TARGET_NR_clone:
7809 /* Linux manages to have three different orderings for its
7810 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7811 * match the kernel's CONFIG_CLONE_* settings.
7812 * Microblaze is further special in that it uses a sixth
7813 * implicit argument to clone for the TLS pointer.
7815 #if defined(TARGET_MICROBLAZE)
7816 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7817 #elif defined(TARGET_CLONE_BACKWARDS)
7818 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7819 #elif defined(TARGET_CLONE_BACKWARDS2)
7820 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7822 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7825 #ifdef __NR_exit_group
7826 /* new thread calls */
7827 case TARGET_NR_exit_group:
7831 gdb_exit(cpu_env, arg1);
7832 ret = get_errno(exit_group(arg1));
7835 case TARGET_NR_setdomainname:
7836 if (!(p = lock_user_string(arg1)))
7838 ret = get_errno(setdomainname(p, arg2));
7839 unlock_user(p, arg1, 0);
7841 case TARGET_NR_uname:
7842 /* no need to transcode because we use the linux syscall */
7844 struct new_utsname * buf;
7846 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7848 ret = get_errno(sys_uname(buf));
7849 if (!is_error(ret)) {
7850 /* Overrite the native machine name with whatever is being
7852 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7853 /* Allow the user to override the reported release. */
7854 if (qemu_uname_release && *qemu_uname_release)
7855 strcpy (buf->release, qemu_uname_release);
7857 unlock_user_struct(buf, arg1, 1);
7861 case TARGET_NR_modify_ldt:
7862 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7864 #if !defined(TARGET_X86_64)
7865 case TARGET_NR_vm86old:
7867 case TARGET_NR_vm86:
7868 ret = do_vm86(cpu_env, arg1, arg2);
7872 case TARGET_NR_adjtimex:
7874 #ifdef TARGET_NR_create_module
7875 case TARGET_NR_create_module:
7877 case TARGET_NR_init_module:
7878 case TARGET_NR_delete_module:
7879 #ifdef TARGET_NR_get_kernel_syms
7880 case TARGET_NR_get_kernel_syms:
7883 case TARGET_NR_quotactl:
7885 case TARGET_NR_getpgid:
7886 ret = get_errno(getpgid(arg1));
7888 case TARGET_NR_fchdir:
7889 ret = get_errno(fchdir(arg1));
7891 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7892 case TARGET_NR_bdflush:
7895 #ifdef TARGET_NR_sysfs
7896 case TARGET_NR_sysfs:
7899 case TARGET_NR_personality:
7900 ret = get_errno(personality(arg1));
7902 #ifdef TARGET_NR_afs_syscall
7903 case TARGET_NR_afs_syscall:
7906 #ifdef TARGET_NR__llseek /* Not on alpha */
7907 case TARGET_NR__llseek:
7910 #if !defined(__NR_llseek)
7911 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7913 ret = get_errno(res);
7918 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7920 if ((ret == 0) && put_user_s64(res, arg4)) {
7926 #ifdef TARGET_NR_getdents
7927 case TARGET_NR_getdents:
7928 #ifdef __NR_getdents
7929 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7931 struct target_dirent *target_dirp;
7932 struct linux_dirent *dirp;
7933 abi_long count = arg3;
7935 dirp = g_try_malloc(count);
7937 ret = -TARGET_ENOMEM;
7941 ret = get_errno(sys_getdents(arg1, dirp, count));
7942 if (!is_error(ret)) {
7943 struct linux_dirent *de;
7944 struct target_dirent *tde;
7946 int reclen, treclen;
7947 int count1, tnamelen;
7951 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7955 reclen = de->d_reclen;
7956 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7957 assert(tnamelen >= 0);
7958 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7959 assert(count1 + treclen <= count);
7960 tde->d_reclen = tswap16(treclen);
7961 tde->d_ino = tswapal(de->d_ino);
7962 tde->d_off = tswapal(de->d_off);
7963 memcpy(tde->d_name, de->d_name, tnamelen);
7964 de = (struct linux_dirent *)((char *)de + reclen);
7966 tde = (struct target_dirent *)((char *)tde + treclen);
7970 unlock_user(target_dirp, arg2, ret);
7976 struct linux_dirent *dirp;
7977 abi_long count = arg3;
7979 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7981 ret = get_errno(sys_getdents(arg1, dirp, count));
7982 if (!is_error(ret)) {
7983 struct linux_dirent *de;
7988 reclen = de->d_reclen;
7991 de->d_reclen = tswap16(reclen);
7992 tswapls(&de->d_ino);
7993 tswapls(&de->d_off);
7994 de = (struct linux_dirent *)((char *)de + reclen);
7998 unlock_user(dirp, arg2, ret);
8002 /* Implement getdents in terms of getdents64 */
8004 struct linux_dirent64 *dirp;
8005 abi_long count = arg3;
8007 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8011 ret = get_errno(sys_getdents64(arg1, dirp, count));
8012 if (!is_error(ret)) {
8013 /* Convert the dirent64 structs to target dirent. We do this
8014 * in-place, since we can guarantee that a target_dirent is no
8015 * larger than a dirent64; however this means we have to be
8016 * careful to read everything before writing in the new format.
8018 struct linux_dirent64 *de;
8019 struct target_dirent *tde;
8024 tde = (struct target_dirent *)dirp;
8026 int namelen, treclen;
8027 int reclen = de->d_reclen;
8028 uint64_t ino = de->d_ino;
8029 int64_t off = de->d_off;
8030 uint8_t type = de->d_type;
8032 namelen = strlen(de->d_name);
8033 treclen = offsetof(struct target_dirent, d_name)
8035 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8037 memmove(tde->d_name, de->d_name, namelen + 1);
8038 tde->d_ino = tswapal(ino);
8039 tde->d_off = tswapal(off);
8040 tde->d_reclen = tswap16(treclen);
8041 /* The target_dirent type is in what was formerly a padding
8042 * byte at the end of the structure:
8044 *(((char *)tde) + treclen - 1) = type;
8046 de = (struct linux_dirent64 *)((char *)de + reclen);
8047 tde = (struct target_dirent *)((char *)tde + treclen);
8053 unlock_user(dirp, arg2, ret);
8057 #endif /* TARGET_NR_getdents */
8058 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8059 case TARGET_NR_getdents64:
8061 struct linux_dirent64 *dirp;
8062 abi_long count = arg3;
8063 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8065 ret = get_errno(sys_getdents64(arg1, dirp, count));
8066 if (!is_error(ret)) {
8067 struct linux_dirent64 *de;
8072 reclen = de->d_reclen;
8075 de->d_reclen = tswap16(reclen);
8076 tswap64s((uint64_t *)&de->d_ino);
8077 tswap64s((uint64_t *)&de->d_off);
8078 de = (struct linux_dirent64 *)((char *)de + reclen);
8082 unlock_user(dirp, arg2, ret);
8085 #endif /* TARGET_NR_getdents64 */
8086 #if defined(TARGET_NR__newselect)
8087 case TARGET_NR__newselect:
8088 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8091 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8092 # ifdef TARGET_NR_poll
8093 case TARGET_NR_poll:
8095 # ifdef TARGET_NR_ppoll
8096 case TARGET_NR_ppoll:
8099 struct target_pollfd *target_pfd;
8100 unsigned int nfds = arg2;
8108 target_pfd = lock_user(VERIFY_WRITE, arg1,
8109 sizeof(struct target_pollfd) * nfds, 1);
8114 pfd = alloca(sizeof(struct pollfd) * nfds);
8115 for (i = 0; i < nfds; i++) {
8116 pfd[i].fd = tswap32(target_pfd[i].fd);
8117 pfd[i].events = tswap16(target_pfd[i].events);
8121 # ifdef TARGET_NR_ppoll
8122 if (num == TARGET_NR_ppoll) {
8123 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8124 target_sigset_t *target_set;
8125 sigset_t _set, *set = &_set;
8128 if (target_to_host_timespec(timeout_ts, arg3)) {
8129 unlock_user(target_pfd, arg1, 0);
8137 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8139 unlock_user(target_pfd, arg1, 0);
8142 target_to_host_sigset(set, target_set);
8147 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
8149 if (!is_error(ret) && arg3) {
8150 host_to_target_timespec(arg3, timeout_ts);
8153 unlock_user(target_set, arg4, 0);
8157 ret = get_errno(poll(pfd, nfds, timeout));
8159 if (!is_error(ret)) {
8160 for(i = 0; i < nfds; i++) {
8161 target_pfd[i].revents = tswap16(pfd[i].revents);
8164 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
8168 case TARGET_NR_flock:
8169 /* NOTE: the flock constant seems to be the same for every
8171 ret = get_errno(flock(arg1, arg2));
8173 case TARGET_NR_readv:
8175 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
8177 ret = get_errno(readv(arg1, vec, arg3));
8178 unlock_iovec(vec, arg2, arg3, 1);
8180 ret = -host_to_target_errno(errno);
8184 case TARGET_NR_writev:
8186 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8188 ret = get_errno(writev(arg1, vec, arg3));
8189 unlock_iovec(vec, arg2, arg3, 0);
8191 ret = -host_to_target_errno(errno);
8195 case TARGET_NR_getsid:
8196 ret = get_errno(getsid(arg1));
8198 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8199 case TARGET_NR_fdatasync:
8200 ret = get_errno(fdatasync(arg1));
8203 #ifdef TARGET_NR__sysctl
8204 case TARGET_NR__sysctl:
8205 /* We don't implement this, but ENOTDIR is always a safe
8207 ret = -TARGET_ENOTDIR;
8210 case TARGET_NR_sched_getaffinity:
8212 unsigned int mask_size;
8213 unsigned long *mask;
8216 * sched_getaffinity needs multiples of ulong, so need to take
8217 * care of mismatches between target ulong and host ulong sizes.
8219 if (arg2 & (sizeof(abi_ulong) - 1)) {
8220 ret = -TARGET_EINVAL;
8223 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8225 mask = alloca(mask_size);
8226 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
8228 if (!is_error(ret)) {
8230 /* More data returned than the caller's buffer will fit.
8231 * This only happens if sizeof(abi_long) < sizeof(long)
8232 * and the caller passed us a buffer holding an odd number
8233 * of abi_longs. If the host kernel is actually using the
8234 * extra 4 bytes then fail EINVAL; otherwise we can just
8235 * ignore them and only copy the interesting part.
8237 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
8238 if (numcpus > arg2 * 8) {
8239 ret = -TARGET_EINVAL;
8245 if (copy_to_user(arg3, mask, ret)) {
8251 case TARGET_NR_sched_setaffinity:
8253 unsigned int mask_size;
8254 unsigned long *mask;
8257 * sched_setaffinity needs multiples of ulong, so need to take
8258 * care of mismatches between target ulong and host ulong sizes.
8260 if (arg2 & (sizeof(abi_ulong) - 1)) {
8261 ret = -TARGET_EINVAL;
8264 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8266 mask = alloca(mask_size);
8267 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
8270 memcpy(mask, p, arg2);
8271 unlock_user_struct(p, arg2, 0);
8273 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
8276 case TARGET_NR_sched_setparam:
8278 struct sched_param *target_schp;
8279 struct sched_param schp;
8282 return -TARGET_EINVAL;
8284 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
8286 schp.sched_priority = tswap32(target_schp->sched_priority);
8287 unlock_user_struct(target_schp, arg2, 0);
8288 ret = get_errno(sched_setparam(arg1, &schp));
8291 case TARGET_NR_sched_getparam:
8293 struct sched_param *target_schp;
8294 struct sched_param schp;
8297 return -TARGET_EINVAL;
8299 ret = get_errno(sched_getparam(arg1, &schp));
8300 if (!is_error(ret)) {
8301 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
8303 target_schp->sched_priority = tswap32(schp.sched_priority);
8304 unlock_user_struct(target_schp, arg2, 1);
8308 case TARGET_NR_sched_setscheduler:
8310 struct sched_param *target_schp;
8311 struct sched_param schp;
8313 return -TARGET_EINVAL;
8315 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
8317 schp.sched_priority = tswap32(target_schp->sched_priority);
8318 unlock_user_struct(target_schp, arg3, 0);
8319 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
8322 case TARGET_NR_sched_getscheduler:
8323 ret = get_errno(sched_getscheduler(arg1));
8325 case TARGET_NR_sched_yield:
8326 ret = get_errno(sched_yield());
8328 case TARGET_NR_sched_get_priority_max:
8329 ret = get_errno(sched_get_priority_max(arg1));
8331 case TARGET_NR_sched_get_priority_min:
8332 ret = get_errno(sched_get_priority_min(arg1));
8334 case TARGET_NR_sched_rr_get_interval:
8337 ret = get_errno(sched_rr_get_interval(arg1, &ts));
8338 if (!is_error(ret)) {
8339 ret = host_to_target_timespec(arg2, &ts);
8343 case TARGET_NR_nanosleep:
8345 struct timespec req, rem;
8346 target_to_host_timespec(&req, arg1);
8347 ret = get_errno(nanosleep(&req, &rem));
8348 if (is_error(ret) && arg2) {
8349 host_to_target_timespec(arg2, &rem);
8353 #ifdef TARGET_NR_query_module
8354 case TARGET_NR_query_module:
8357 #ifdef TARGET_NR_nfsservctl
8358 case TARGET_NR_nfsservctl:
8361 case TARGET_NR_prctl:
8363 case PR_GET_PDEATHSIG:
8366 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
8367 if (!is_error(ret) && arg2
8368 && put_user_ual(deathsig, arg2)) {
8376 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
8380 ret = get_errno(prctl(arg1, (unsigned long)name,
8382 unlock_user(name, arg2, 16);
8387 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
8391 ret = get_errno(prctl(arg1, (unsigned long)name,
8393 unlock_user(name, arg2, 0);
8398 /* Most prctl options have no pointer arguments */
8399 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
8403 #ifdef TARGET_NR_arch_prctl
8404 case TARGET_NR_arch_prctl:
8405 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8406 ret = do_arch_prctl(cpu_env, arg1, arg2);
8412 #ifdef TARGET_NR_pread64
8413 case TARGET_NR_pread64:
8414 if (regpairs_aligned(cpu_env)) {
8418 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8420 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8421 unlock_user(p, arg2, ret);
8423 case TARGET_NR_pwrite64:
8424 if (regpairs_aligned(cpu_env)) {
8428 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8430 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8431 unlock_user(p, arg2, 0);
8434 case TARGET_NR_getcwd:
8435 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8437 ret = get_errno(sys_getcwd1(p, arg2));
8438 unlock_user(p, arg1, ret);
8440 case TARGET_NR_capget:
8441 case TARGET_NR_capset:
8443 struct target_user_cap_header *target_header;
8444 struct target_user_cap_data *target_data = NULL;
8445 struct __user_cap_header_struct header;
8446 struct __user_cap_data_struct data[2];
8447 struct __user_cap_data_struct *dataptr = NULL;
8448 int i, target_datalen;
8451 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8454 header.version = tswap32(target_header->version);
8455 header.pid = tswap32(target_header->pid);
8457 if (header.version != _LINUX_CAPABILITY_VERSION) {
8458 /* Version 2 and up takes pointer to two user_data structs */
8462 target_datalen = sizeof(*target_data) * data_items;
8465 if (num == TARGET_NR_capget) {
8466 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8468 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8471 unlock_user_struct(target_header, arg1, 0);
8475 if (num == TARGET_NR_capset) {
8476 for (i = 0; i < data_items; i++) {
8477 data[i].effective = tswap32(target_data[i].effective);
8478 data[i].permitted = tswap32(target_data[i].permitted);
8479 data[i].inheritable = tswap32(target_data[i].inheritable);
8486 if (num == TARGET_NR_capget) {
8487 ret = get_errno(capget(&header, dataptr));
8489 ret = get_errno(capset(&header, dataptr));
8492 /* The kernel always updates version for both capget and capset */
8493 target_header->version = tswap32(header.version);
8494 unlock_user_struct(target_header, arg1, 1);
8497 if (num == TARGET_NR_capget) {
8498 for (i = 0; i < data_items; i++) {
8499 target_data[i].effective = tswap32(data[i].effective);
8500 target_data[i].permitted = tswap32(data[i].permitted);
8501 target_data[i].inheritable = tswap32(data[i].inheritable);
8503 unlock_user(target_data, arg2, target_datalen);
8505 unlock_user(target_data, arg2, 0);
8510 case TARGET_NR_sigaltstack:
8511 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8514 #ifdef CONFIG_SENDFILE
8515 case TARGET_NR_sendfile:
8520 ret = get_user_sal(off, arg3);
8521 if (is_error(ret)) {
8526 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8527 if (!is_error(ret) && arg3) {
8528 abi_long ret2 = put_user_sal(off, arg3);
8529 if (is_error(ret2)) {
8535 #ifdef TARGET_NR_sendfile64
8536 case TARGET_NR_sendfile64:
8541 ret = get_user_s64(off, arg3);
8542 if (is_error(ret)) {
8547 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8548 if (!is_error(ret) && arg3) {
8549 abi_long ret2 = put_user_s64(off, arg3);
8550 if (is_error(ret2)) {
8558 case TARGET_NR_sendfile:
8559 #ifdef TARGET_NR_sendfile64
8560 case TARGET_NR_sendfile64:
8565 #ifdef TARGET_NR_getpmsg
8566 case TARGET_NR_getpmsg:
8569 #ifdef TARGET_NR_putpmsg
8570 case TARGET_NR_putpmsg:
8573 #ifdef TARGET_NR_vfork
8574 case TARGET_NR_vfork:
8575 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8579 #ifdef TARGET_NR_ugetrlimit
8580 case TARGET_NR_ugetrlimit:
8583 int resource = target_to_host_resource(arg1);
8584 ret = get_errno(getrlimit(resource, &rlim));
8585 if (!is_error(ret)) {
8586 struct target_rlimit *target_rlim;
8587 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8589 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8590 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8591 unlock_user_struct(target_rlim, arg2, 1);
8596 #ifdef TARGET_NR_truncate64
8597 case TARGET_NR_truncate64:
8598 if (!(p = lock_user_string(arg1)))
8600 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8601 unlock_user(p, arg1, 0);
8604 #ifdef TARGET_NR_ftruncate64
8605 case TARGET_NR_ftruncate64:
8606 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8609 #ifdef TARGET_NR_stat64
8610 case TARGET_NR_stat64:
8611 if (!(p = lock_user_string(arg1)))
8613 ret = get_errno(stat(path(p), &st));
8614 unlock_user(p, arg1, 0);
8616 ret = host_to_target_stat64(cpu_env, arg2, &st);
8619 #ifdef TARGET_NR_lstat64
8620 case TARGET_NR_lstat64:
8621 if (!(p = lock_user_string(arg1)))
8623 ret = get_errno(lstat(path(p), &st));
8624 unlock_user(p, arg1, 0);
8626 ret = host_to_target_stat64(cpu_env, arg2, &st);
8629 #ifdef TARGET_NR_fstat64
8630 case TARGET_NR_fstat64:
8631 ret = get_errno(fstat(arg1, &st));
8633 ret = host_to_target_stat64(cpu_env, arg2, &st);
8636 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8637 #ifdef TARGET_NR_fstatat64
8638 case TARGET_NR_fstatat64:
8640 #ifdef TARGET_NR_newfstatat
8641 case TARGET_NR_newfstatat:
8643 if (!(p = lock_user_string(arg2)))
8645 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8647 ret = host_to_target_stat64(cpu_env, arg3, &st);
8650 #ifdef TARGET_NR_lchown
8651 case TARGET_NR_lchown:
8652 if (!(p = lock_user_string(arg1)))
8654 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8655 unlock_user(p, arg1, 0);
8658 #ifdef TARGET_NR_getuid
8659 case TARGET_NR_getuid:
8660 ret = get_errno(high2lowuid(getuid()));
8663 #ifdef TARGET_NR_getgid
8664 case TARGET_NR_getgid:
8665 ret = get_errno(high2lowgid(getgid()));
8668 #ifdef TARGET_NR_geteuid
8669 case TARGET_NR_geteuid:
8670 ret = get_errno(high2lowuid(geteuid()));
8673 #ifdef TARGET_NR_getegid
8674 case TARGET_NR_getegid:
8675 ret = get_errno(high2lowgid(getegid()));
8678 case TARGET_NR_setreuid:
8679 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8681 case TARGET_NR_setregid:
8682 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8684 case TARGET_NR_getgroups:
8686 int gidsetsize = arg1;
8687 target_id *target_grouplist;
8691 grouplist = alloca(gidsetsize * sizeof(gid_t));
8692 ret = get_errno(getgroups(gidsetsize, grouplist));
8693 if (gidsetsize == 0)
8695 if (!is_error(ret)) {
8696 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8697 if (!target_grouplist)
8699 for(i = 0;i < ret; i++)
8700 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8701 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8705 case TARGET_NR_setgroups:
8707 int gidsetsize = arg1;
8708 target_id *target_grouplist;
8709 gid_t *grouplist = NULL;
8712 grouplist = alloca(gidsetsize * sizeof(gid_t));
8713 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8714 if (!target_grouplist) {
8715 ret = -TARGET_EFAULT;
8718 for (i = 0; i < gidsetsize; i++) {
8719 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8721 unlock_user(target_grouplist, arg2, 0);
8723 ret = get_errno(setgroups(gidsetsize, grouplist));
8726 case TARGET_NR_fchown:
8727 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8729 #if defined(TARGET_NR_fchownat)
8730 case TARGET_NR_fchownat:
8731 if (!(p = lock_user_string(arg2)))
8733 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8734 low2highgid(arg4), arg5));
8735 unlock_user(p, arg2, 0);
8738 #ifdef TARGET_NR_setresuid
8739 case TARGET_NR_setresuid:
8740 ret = get_errno(setresuid(low2highuid(arg1),
8742 low2highuid(arg3)));
8745 #ifdef TARGET_NR_getresuid
8746 case TARGET_NR_getresuid:
8748 uid_t ruid, euid, suid;
8749 ret = get_errno(getresuid(&ruid, &euid, &suid));
8750 if (!is_error(ret)) {
8751 if (put_user_id(high2lowuid(ruid), arg1)
8752 || put_user_id(high2lowuid(euid), arg2)
8753 || put_user_id(high2lowuid(suid), arg3))
8759 #ifdef TARGET_NR_getresgid
8760 case TARGET_NR_setresgid:
8761 ret = get_errno(setresgid(low2highgid(arg1),
8763 low2highgid(arg3)));
8766 #ifdef TARGET_NR_getresgid
8767 case TARGET_NR_getresgid:
8769 gid_t rgid, egid, sgid;
8770 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8771 if (!is_error(ret)) {
8772 if (put_user_id(high2lowgid(rgid), arg1)
8773 || put_user_id(high2lowgid(egid), arg2)
8774 || put_user_id(high2lowgid(sgid), arg3))
8780 #ifdef TARGET_NR_chown
8781 case TARGET_NR_chown:
8782 if (!(p = lock_user_string(arg1)))
8784 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8785 unlock_user(p, arg1, 0);
8788 case TARGET_NR_setuid:
8789 ret = get_errno(setuid(low2highuid(arg1)));
8791 case TARGET_NR_setgid:
8792 ret = get_errno(setgid(low2highgid(arg1)));
8794 case TARGET_NR_setfsuid:
8795 ret = get_errno(setfsuid(arg1));
8797 case TARGET_NR_setfsgid:
8798 ret = get_errno(setfsgid(arg1));
8801 #ifdef TARGET_NR_lchown32
8802 case TARGET_NR_lchown32:
8803 if (!(p = lock_user_string(arg1)))
8805 ret = get_errno(lchown(p, arg2, arg3));
8806 unlock_user(p, arg1, 0);
8809 #ifdef TARGET_NR_getuid32
8810 case TARGET_NR_getuid32:
8811 ret = get_errno(getuid());
8815 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8816 /* Alpha specific */
8817 case TARGET_NR_getxuid:
8821 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8823 ret = get_errno(getuid());
8826 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8827 /* Alpha specific */
8828 case TARGET_NR_getxgid:
8832 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8834 ret = get_errno(getgid());
8837 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8838 /* Alpha specific */
8839 case TARGET_NR_osf_getsysinfo:
8840 ret = -TARGET_EOPNOTSUPP;
8842 case TARGET_GSI_IEEE_FP_CONTROL:
8844 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8846 /* Copied from linux ieee_fpcr_to_swcr. */
8847 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8848 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8849 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8850 | SWCR_TRAP_ENABLE_DZE
8851 | SWCR_TRAP_ENABLE_OVF);
8852 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8853 | SWCR_TRAP_ENABLE_INE);
8854 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8855 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8857 if (put_user_u64 (swcr, arg2))
8863 /* case GSI_IEEE_STATE_AT_SIGNAL:
8864 -- Not implemented in linux kernel.
8866 -- Retrieves current unaligned access state; not much used.
8868 -- Retrieves implver information; surely not used.
8870 -- Grabs a copy of the HWRPB; surely not used.
8875 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8876 /* Alpha specific */
8877 case TARGET_NR_osf_setsysinfo:
8878 ret = -TARGET_EOPNOTSUPP;
8880 case TARGET_SSI_IEEE_FP_CONTROL:
8882 uint64_t swcr, fpcr, orig_fpcr;
8884 if (get_user_u64 (swcr, arg2)) {
8887 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8888 fpcr = orig_fpcr & FPCR_DYN_MASK;
8890 /* Copied from linux ieee_swcr_to_fpcr. */
8891 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8892 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8893 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8894 | SWCR_TRAP_ENABLE_DZE
8895 | SWCR_TRAP_ENABLE_OVF)) << 48;
8896 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8897 | SWCR_TRAP_ENABLE_INE)) << 57;
8898 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8899 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8901 cpu_alpha_store_fpcr(cpu_env, fpcr);
8906 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8908 uint64_t exc, fpcr, orig_fpcr;
8911 if (get_user_u64(exc, arg2)) {
8915 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8917 /* We only add to the exception status here. */
8918 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8920 cpu_alpha_store_fpcr(cpu_env, fpcr);
8923 /* Old exceptions are not signaled. */
8924 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8926 /* If any exceptions set by this call,
8927 and are unmasked, send a signal. */
8929 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8930 si_code = TARGET_FPE_FLTRES;
8932 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8933 si_code = TARGET_FPE_FLTUND;
8935 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8936 si_code = TARGET_FPE_FLTOVF;
8938 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8939 si_code = TARGET_FPE_FLTDIV;
8941 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8942 si_code = TARGET_FPE_FLTINV;
8945 target_siginfo_t info;
8946 info.si_signo = SIGFPE;
8948 info.si_code = si_code;
8949 info._sifields._sigfault._addr
8950 = ((CPUArchState *)cpu_env)->pc;
8951 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8956 /* case SSI_NVPAIRS:
8957 -- Used with SSIN_UACPROC to enable unaligned accesses.
8958 case SSI_IEEE_STATE_AT_SIGNAL:
8959 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8960 -- Not implemented in linux kernel
8965 #ifdef TARGET_NR_osf_sigprocmask
8966 /* Alpha specific. */
8967 case TARGET_NR_osf_sigprocmask:
8971 sigset_t set, oldset;
8974 case TARGET_SIG_BLOCK:
8977 case TARGET_SIG_UNBLOCK:
8980 case TARGET_SIG_SETMASK:
8984 ret = -TARGET_EINVAL;
8988 target_to_host_old_sigset(&set, &mask);
8989 do_sigprocmask(how, &set, &oldset);
8990 host_to_target_old_sigset(&mask, &oldset);
8996 #ifdef TARGET_NR_getgid32
8997 case TARGET_NR_getgid32:
8998 ret = get_errno(getgid());
9001 #ifdef TARGET_NR_geteuid32
9002 case TARGET_NR_geteuid32:
9003 ret = get_errno(geteuid());
9006 #ifdef TARGET_NR_getegid32
9007 case TARGET_NR_getegid32:
9008 ret = get_errno(getegid());
9011 #ifdef TARGET_NR_setreuid32
9012 case TARGET_NR_setreuid32:
9013 ret = get_errno(setreuid(arg1, arg2));
9016 #ifdef TARGET_NR_setregid32
9017 case TARGET_NR_setregid32:
9018 ret = get_errno(setregid(arg1, arg2));
9021 #ifdef TARGET_NR_getgroups32
9022 case TARGET_NR_getgroups32:
9024 int gidsetsize = arg1;
9025 uint32_t *target_grouplist;
9029 grouplist = alloca(gidsetsize * sizeof(gid_t));
9030 ret = get_errno(getgroups(gidsetsize, grouplist));
9031 if (gidsetsize == 0)
9033 if (!is_error(ret)) {
9034 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9035 if (!target_grouplist) {
9036 ret = -TARGET_EFAULT;
9039 for(i = 0;i < ret; i++)
9040 target_grouplist[i] = tswap32(grouplist[i]);
9041 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9046 #ifdef TARGET_NR_setgroups32
9047 case TARGET_NR_setgroups32:
9049 int gidsetsize = arg1;
9050 uint32_t *target_grouplist;
9054 grouplist = alloca(gidsetsize * sizeof(gid_t));
9055 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9056 if (!target_grouplist) {
9057 ret = -TARGET_EFAULT;
9060 for(i = 0;i < gidsetsize; i++)
9061 grouplist[i] = tswap32(target_grouplist[i]);
9062 unlock_user(target_grouplist, arg2, 0);
9063 ret = get_errno(setgroups(gidsetsize, grouplist));
9067 #ifdef TARGET_NR_fchown32
9068 case TARGET_NR_fchown32:
9069 ret = get_errno(fchown(arg1, arg2, arg3));
9072 #ifdef TARGET_NR_setresuid32
9073 case TARGET_NR_setresuid32:
9074 ret = get_errno(setresuid(arg1, arg2, arg3));
9077 #ifdef TARGET_NR_getresuid32
9078 case TARGET_NR_getresuid32:
9080 uid_t ruid, euid, suid;
9081 ret = get_errno(getresuid(&ruid, &euid, &suid));
9082 if (!is_error(ret)) {
9083 if (put_user_u32(ruid, arg1)
9084 || put_user_u32(euid, arg2)
9085 || put_user_u32(suid, arg3))
9091 #ifdef TARGET_NR_setresgid32
9092 case TARGET_NR_setresgid32:
9093 ret = get_errno(setresgid(arg1, arg2, arg3));
9096 #ifdef TARGET_NR_getresgid32
9097 case TARGET_NR_getresgid32:
9099 gid_t rgid, egid, sgid;
9100 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9101 if (!is_error(ret)) {
9102 if (put_user_u32(rgid, arg1)
9103 || put_user_u32(egid, arg2)
9104 || put_user_u32(sgid, arg3))
9110 #ifdef TARGET_NR_chown32
9111 case TARGET_NR_chown32:
9112 if (!(p = lock_user_string(arg1)))
9114 ret = get_errno(chown(p, arg2, arg3));
9115 unlock_user(p, arg1, 0);
9118 #ifdef TARGET_NR_setuid32
9119 case TARGET_NR_setuid32:
9120 ret = get_errno(setuid(arg1));
9123 #ifdef TARGET_NR_setgid32
9124 case TARGET_NR_setgid32:
9125 ret = get_errno(setgid(arg1));
9128 #ifdef TARGET_NR_setfsuid32
9129 case TARGET_NR_setfsuid32:
9130 ret = get_errno(setfsuid(arg1));
9133 #ifdef TARGET_NR_setfsgid32
9134 case TARGET_NR_setfsgid32:
9135 ret = get_errno(setfsgid(arg1));
9139 case TARGET_NR_pivot_root:
9141 #ifdef TARGET_NR_mincore
9142 case TARGET_NR_mincore:
9145 ret = -TARGET_EFAULT;
9146 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
9148 if (!(p = lock_user_string(arg3)))
9150 ret = get_errno(mincore(a, arg2, p));
9151 unlock_user(p, arg3, ret);
9153 unlock_user(a, arg1, 0);
9157 #ifdef TARGET_NR_arm_fadvise64_64
9158 case TARGET_NR_arm_fadvise64_64:
9161 * arm_fadvise64_64 looks like fadvise64_64 but
9162 * with different argument order
9170 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9171 #ifdef TARGET_NR_fadvise64_64
9172 case TARGET_NR_fadvise64_64:
9174 #ifdef TARGET_NR_fadvise64
9175 case TARGET_NR_fadvise64:
9179 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
9180 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
9181 case 6: arg4 = POSIX_FADV_DONTNEED; break;
9182 case 7: arg4 = POSIX_FADV_NOREUSE; break;
9186 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
9189 #ifdef TARGET_NR_madvise
9190 case TARGET_NR_madvise:
9191 /* A straight passthrough may not be safe because qemu sometimes
9192 turns private file-backed mappings into anonymous mappings.
9193 This will break MADV_DONTNEED.
9194 This is a hint, so ignoring and returning success is ok. */
9198 #if TARGET_ABI_BITS == 32
9199 case TARGET_NR_fcntl64:
9203 struct target_flock64 *target_fl;
9205 struct target_eabi_flock64 *target_efl;
9208 cmd = target_to_host_fcntl_cmd(arg2);
9209 if (cmd == -TARGET_EINVAL) {
9215 case TARGET_F_GETLK64:
9217 if (((CPUARMState *)cpu_env)->eabi) {
9218 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9220 fl.l_type = tswap16(target_efl->l_type);
9221 fl.l_whence = tswap16(target_efl->l_whence);
9222 fl.l_start = tswap64(target_efl->l_start);
9223 fl.l_len = tswap64(target_efl->l_len);
9224 fl.l_pid = tswap32(target_efl->l_pid);
9225 unlock_user_struct(target_efl, arg3, 0);
9229 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9231 fl.l_type = tswap16(target_fl->l_type);
9232 fl.l_whence = tswap16(target_fl->l_whence);
9233 fl.l_start = tswap64(target_fl->l_start);
9234 fl.l_len = tswap64(target_fl->l_len);
9235 fl.l_pid = tswap32(target_fl->l_pid);
9236 unlock_user_struct(target_fl, arg3, 0);
9238 ret = get_errno(fcntl(arg1, cmd, &fl));
9241 if (((CPUARMState *)cpu_env)->eabi) {
9242 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
9244 target_efl->l_type = tswap16(fl.l_type);
9245 target_efl->l_whence = tswap16(fl.l_whence);
9246 target_efl->l_start = tswap64(fl.l_start);
9247 target_efl->l_len = tswap64(fl.l_len);
9248 target_efl->l_pid = tswap32(fl.l_pid);
9249 unlock_user_struct(target_efl, arg3, 1);
9253 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
9255 target_fl->l_type = tswap16(fl.l_type);
9256 target_fl->l_whence = tswap16(fl.l_whence);
9257 target_fl->l_start = tswap64(fl.l_start);
9258 target_fl->l_len = tswap64(fl.l_len);
9259 target_fl->l_pid = tswap32(fl.l_pid);
9260 unlock_user_struct(target_fl, arg3, 1);
9265 case TARGET_F_SETLK64:
9266 case TARGET_F_SETLKW64:
9268 if (((CPUARMState *)cpu_env)->eabi) {
9269 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9271 fl.l_type = tswap16(target_efl->l_type);
9272 fl.l_whence = tswap16(target_efl->l_whence);
9273 fl.l_start = tswap64(target_efl->l_start);
9274 fl.l_len = tswap64(target_efl->l_len);
9275 fl.l_pid = tswap32(target_efl->l_pid);
9276 unlock_user_struct(target_efl, arg3, 0);
9280 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9282 fl.l_type = tswap16(target_fl->l_type);
9283 fl.l_whence = tswap16(target_fl->l_whence);
9284 fl.l_start = tswap64(target_fl->l_start);
9285 fl.l_len = tswap64(target_fl->l_len);
9286 fl.l_pid = tswap32(target_fl->l_pid);
9287 unlock_user_struct(target_fl, arg3, 0);
9289 ret = get_errno(fcntl(arg1, cmd, &fl));
9292 ret = do_fcntl(arg1, arg2, arg3);
9298 #ifdef TARGET_NR_cacheflush
9299 case TARGET_NR_cacheflush:
9300 /* self-modifying code is handled automatically, so nothing needed */
9304 #ifdef TARGET_NR_security
9305 case TARGET_NR_security:
9308 #ifdef TARGET_NR_getpagesize
9309 case TARGET_NR_getpagesize:
9310 ret = TARGET_PAGE_SIZE;
9313 case TARGET_NR_gettid:
9314 ret = get_errno(gettid());
9316 #ifdef TARGET_NR_readahead
9317 case TARGET_NR_readahead:
9318 #if TARGET_ABI_BITS == 32
9319 if (regpairs_aligned(cpu_env)) {
9324 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
9326 ret = get_errno(readahead(arg1, arg2, arg3));
9331 #ifdef TARGET_NR_setxattr
9332 case TARGET_NR_listxattr:
9333 case TARGET_NR_llistxattr:
9337 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9339 ret = -TARGET_EFAULT;
9343 p = lock_user_string(arg1);
9345 if (num == TARGET_NR_listxattr) {
9346 ret = get_errno(listxattr(p, b, arg3));
9348 ret = get_errno(llistxattr(p, b, arg3));
9351 ret = -TARGET_EFAULT;
9353 unlock_user(p, arg1, 0);
9354 unlock_user(b, arg2, arg3);
9357 case TARGET_NR_flistxattr:
9361 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9363 ret = -TARGET_EFAULT;
9367 ret = get_errno(flistxattr(arg1, b, arg3));
9368 unlock_user(b, arg2, arg3);
9371 case TARGET_NR_setxattr:
9372 case TARGET_NR_lsetxattr:
9374 void *p, *n, *v = 0;
9376 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9378 ret = -TARGET_EFAULT;
9382 p = lock_user_string(arg1);
9383 n = lock_user_string(arg2);
9385 if (num == TARGET_NR_setxattr) {
9386 ret = get_errno(setxattr(p, n, v, arg4, arg5));
9388 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
9391 ret = -TARGET_EFAULT;
9393 unlock_user(p, arg1, 0);
9394 unlock_user(n, arg2, 0);
9395 unlock_user(v, arg3, 0);
9398 case TARGET_NR_fsetxattr:
9402 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9404 ret = -TARGET_EFAULT;
9408 n = lock_user_string(arg2);
9410 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9412 ret = -TARGET_EFAULT;
9414 unlock_user(n, arg2, 0);
9415 unlock_user(v, arg3, 0);
9418 case TARGET_NR_getxattr:
9419 case TARGET_NR_lgetxattr:
9421 void *p, *n, *v = 0;
9423 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9425 ret = -TARGET_EFAULT;
9429 p = lock_user_string(arg1);
9430 n = lock_user_string(arg2);
9432 if (num == TARGET_NR_getxattr) {
9433 ret = get_errno(getxattr(p, n, v, arg4));
9435 ret = get_errno(lgetxattr(p, n, v, arg4));
9438 ret = -TARGET_EFAULT;
9440 unlock_user(p, arg1, 0);
9441 unlock_user(n, arg2, 0);
9442 unlock_user(v, arg3, arg4);
9445 case TARGET_NR_fgetxattr:
9449 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9451 ret = -TARGET_EFAULT;
9455 n = lock_user_string(arg2);
9457 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9459 ret = -TARGET_EFAULT;
9461 unlock_user(n, arg2, 0);
9462 unlock_user(v, arg3, arg4);
9465 case TARGET_NR_removexattr:
9466 case TARGET_NR_lremovexattr:
9469 p = lock_user_string(arg1);
9470 n = lock_user_string(arg2);
9472 if (num == TARGET_NR_removexattr) {
9473 ret = get_errno(removexattr(p, n));
9475 ret = get_errno(lremovexattr(p, n));
9478 ret = -TARGET_EFAULT;
9480 unlock_user(p, arg1, 0);
9481 unlock_user(n, arg2, 0);
9484 case TARGET_NR_fremovexattr:
9487 n = lock_user_string(arg2);
9489 ret = get_errno(fremovexattr(arg1, n));
9491 ret = -TARGET_EFAULT;
9493 unlock_user(n, arg2, 0);
9497 #endif /* CONFIG_ATTR */
9498 #ifdef TARGET_NR_set_thread_area
9499 case TARGET_NR_set_thread_area:
9500 #if defined(TARGET_MIPS)
9501 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9504 #elif defined(TARGET_CRIS)
9506 ret = -TARGET_EINVAL;
9508 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9512 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9513 ret = do_set_thread_area(cpu_env, arg1);
9515 #elif defined(TARGET_M68K)
9517 TaskState *ts = cpu->opaque;
9518 ts->tp_value = arg1;
9523 goto unimplemented_nowarn;
9526 #ifdef TARGET_NR_get_thread_area
9527 case TARGET_NR_get_thread_area:
9528 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9529 ret = do_get_thread_area(cpu_env, arg1);
9531 #elif defined(TARGET_M68K)
9533 TaskState *ts = cpu->opaque;
9538 goto unimplemented_nowarn;
9541 #ifdef TARGET_NR_getdomainname
9542 case TARGET_NR_getdomainname:
9543 goto unimplemented_nowarn;
9546 #ifdef TARGET_NR_clock_gettime
9547 case TARGET_NR_clock_gettime:
9550 ret = get_errno(clock_gettime(arg1, &ts));
9551 if (!is_error(ret)) {
9552 host_to_target_timespec(arg2, &ts);
9557 #ifdef TARGET_NR_clock_getres
9558 case TARGET_NR_clock_getres:
9561 ret = get_errno(clock_getres(arg1, &ts));
9562 if (!is_error(ret)) {
9563 host_to_target_timespec(arg2, &ts);
9568 #ifdef TARGET_NR_clock_nanosleep
9569 case TARGET_NR_clock_nanosleep:
9572 target_to_host_timespec(&ts, arg3);
9573 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9575 host_to_target_timespec(arg4, &ts);
9577 #if defined(TARGET_PPC)
9578 /* clock_nanosleep is odd in that it returns positive errno values.
9579 * On PPC, CR0 bit 3 should be set in such a situation. */
9581 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9588 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9589 case TARGET_NR_set_tid_address:
9590 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9594 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9595 case TARGET_NR_tkill:
9596 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9600 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9601 case TARGET_NR_tgkill:
9602 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9603 target_to_host_signal(arg3)));
9607 #ifdef TARGET_NR_set_robust_list
9608 case TARGET_NR_set_robust_list:
9609 case TARGET_NR_get_robust_list:
9610 /* The ABI for supporting robust futexes has userspace pass
9611 * the kernel a pointer to a linked list which is updated by
9612 * userspace after the syscall; the list is walked by the kernel
9613 * when the thread exits. Since the linked list in QEMU guest
9614 * memory isn't a valid linked list for the host and we have
9615 * no way to reliably intercept the thread-death event, we can't
9616 * support these. Silently return ENOSYS so that guest userspace
9617 * falls back to a non-robust futex implementation (which should
9618 * be OK except in the corner case of the guest crashing while
9619 * holding a mutex that is shared with another process via
9622 goto unimplemented_nowarn;
9625 #if defined(TARGET_NR_utimensat)
9626 case TARGET_NR_utimensat:
9628 struct timespec *tsp, ts[2];
9632 target_to_host_timespec(ts, arg3);
9633 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9637 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9639 if (!(p = lock_user_string(arg2))) {
9640 ret = -TARGET_EFAULT;
9643 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9644 unlock_user(p, arg2, 0);
9649 case TARGET_NR_futex:
9650 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9652 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9653 case TARGET_NR_inotify_init:
9654 ret = get_errno(sys_inotify_init());
9657 #ifdef CONFIG_INOTIFY1
9658 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9659 case TARGET_NR_inotify_init1:
9660 ret = get_errno(sys_inotify_init1(arg1));
9664 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9665 case TARGET_NR_inotify_add_watch:
9666 p = lock_user_string(arg2);
9667 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9668 unlock_user(p, arg2, 0);
9671 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9672 case TARGET_NR_inotify_rm_watch:
9673 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9677 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9678 case TARGET_NR_mq_open:
9680 struct mq_attr posix_mq_attr, *attrp;
9682 p = lock_user_string(arg1 - 1);
9684 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9685 attrp = &posix_mq_attr;
9689 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9690 unlock_user (p, arg1, 0);
9694 case TARGET_NR_mq_unlink:
9695 p = lock_user_string(arg1 - 1);
9696 ret = get_errno(mq_unlink(p));
9697 unlock_user (p, arg1, 0);
9700 case TARGET_NR_mq_timedsend:
9704 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9706 target_to_host_timespec(&ts, arg5);
9707 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9708 host_to_target_timespec(arg5, &ts);
9711 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9712 unlock_user (p, arg2, arg3);
9716 case TARGET_NR_mq_timedreceive:
9721 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9723 target_to_host_timespec(&ts, arg5);
9724 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9725 host_to_target_timespec(arg5, &ts);
9728 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9729 unlock_user (p, arg2, arg3);
9731 put_user_u32(prio, arg4);
9735 /* Not implemented for now... */
9736 /* case TARGET_NR_mq_notify: */
9739 case TARGET_NR_mq_getsetattr:
9741 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9744 ret = mq_getattr(arg1, &posix_mq_attr_out);
9745 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9748 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9749 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9756 #ifdef CONFIG_SPLICE
9757 #ifdef TARGET_NR_tee
9760 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9764 #ifdef TARGET_NR_splice
9765 case TARGET_NR_splice:
9767 loff_t loff_in, loff_out;
9768 loff_t *ploff_in = NULL, *ploff_out = NULL;
9770 if (get_user_u64(loff_in, arg2)) {
9773 ploff_in = &loff_in;
9776 if (get_user_u64(loff_out, arg4)) {
9779 ploff_out = &loff_out;
9781 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9783 if (put_user_u64(loff_in, arg2)) {
9788 if (put_user_u64(loff_out, arg4)) {
9795 #ifdef TARGET_NR_vmsplice
9796 case TARGET_NR_vmsplice:
9798 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9800 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9801 unlock_iovec(vec, arg2, arg3, 0);
9803 ret = -host_to_target_errno(errno);
9808 #endif /* CONFIG_SPLICE */
9809 #ifdef CONFIG_EVENTFD
9810 #if defined(TARGET_NR_eventfd)
9811 case TARGET_NR_eventfd:
9812 ret = get_errno(eventfd(arg1, 0));
9813 fd_trans_unregister(ret);
9816 #if defined(TARGET_NR_eventfd2)
9817 case TARGET_NR_eventfd2:
9819 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9820 if (arg2 & TARGET_O_NONBLOCK) {
9821 host_flags |= O_NONBLOCK;
9823 if (arg2 & TARGET_O_CLOEXEC) {
9824 host_flags |= O_CLOEXEC;
9826 ret = get_errno(eventfd(arg1, host_flags));
9827 fd_trans_unregister(ret);
9831 #endif /* CONFIG_EVENTFD */
9832 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9833 case TARGET_NR_fallocate:
9834 #if TARGET_ABI_BITS == 32
9835 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9836 target_offset64(arg5, arg6)));
9838 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9842 #if defined(CONFIG_SYNC_FILE_RANGE)
9843 #if defined(TARGET_NR_sync_file_range)
9844 case TARGET_NR_sync_file_range:
9845 #if TARGET_ABI_BITS == 32
9846 #if defined(TARGET_MIPS)
9847 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9848 target_offset64(arg5, arg6), arg7));
9850 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9851 target_offset64(arg4, arg5), arg6));
9852 #endif /* !TARGET_MIPS */
9854 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9858 #if defined(TARGET_NR_sync_file_range2)
9859 case TARGET_NR_sync_file_range2:
9860 /* This is like sync_file_range but the arguments are reordered */
9861 #if TARGET_ABI_BITS == 32
9862 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9863 target_offset64(arg5, arg6), arg2));
9865 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9870 #if defined(TARGET_NR_signalfd4)
9871 case TARGET_NR_signalfd4:
9872 ret = do_signalfd4(arg1, arg2, arg4);
9875 #if defined(TARGET_NR_signalfd)
9876 case TARGET_NR_signalfd:
9877 ret = do_signalfd4(arg1, arg2, 0);
9880 #if defined(CONFIG_EPOLL)
9881 #if defined(TARGET_NR_epoll_create)
9882 case TARGET_NR_epoll_create:
9883 ret = get_errno(epoll_create(arg1));
9886 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9887 case TARGET_NR_epoll_create1:
9888 ret = get_errno(epoll_create1(arg1));
9891 #if defined(TARGET_NR_epoll_ctl)
9892 case TARGET_NR_epoll_ctl:
9894 struct epoll_event ep;
9895 struct epoll_event *epp = 0;
9897 struct target_epoll_event *target_ep;
9898 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9901 ep.events = tswap32(target_ep->events);
9902 /* The epoll_data_t union is just opaque data to the kernel,
9903 * so we transfer all 64 bits across and need not worry what
9904 * actual data type it is.
9906 ep.data.u64 = tswap64(target_ep->data.u64);
9907 unlock_user_struct(target_ep, arg4, 0);
9910 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9915 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9916 #define IMPLEMENT_EPOLL_PWAIT
9918 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9919 #if defined(TARGET_NR_epoll_wait)
9920 case TARGET_NR_epoll_wait:
9922 #if defined(IMPLEMENT_EPOLL_PWAIT)
9923 case TARGET_NR_epoll_pwait:
9926 struct target_epoll_event *target_ep;
9927 struct epoll_event *ep;
9929 int maxevents = arg3;
9932 target_ep = lock_user(VERIFY_WRITE, arg2,
9933 maxevents * sizeof(struct target_epoll_event), 1);
9938 ep = alloca(maxevents * sizeof(struct epoll_event));
9941 #if defined(IMPLEMENT_EPOLL_PWAIT)
9942 case TARGET_NR_epoll_pwait:
9944 target_sigset_t *target_set;
9945 sigset_t _set, *set = &_set;
9948 target_set = lock_user(VERIFY_READ, arg5,
9949 sizeof(target_sigset_t), 1);
9951 unlock_user(target_ep, arg2, 0);
9954 target_to_host_sigset(set, target_set);
9955 unlock_user(target_set, arg5, 0);
9960 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9964 #if defined(TARGET_NR_epoll_wait)
9965 case TARGET_NR_epoll_wait:
9966 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9970 ret = -TARGET_ENOSYS;
9972 if (!is_error(ret)) {
9974 for (i = 0; i < ret; i++) {
9975 target_ep[i].events = tswap32(ep[i].events);
9976 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9979 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9984 #ifdef TARGET_NR_prlimit64
9985 case TARGET_NR_prlimit64:
9987 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9988 struct target_rlimit64 *target_rnew, *target_rold;
9989 struct host_rlimit64 rnew, rold, *rnewp = 0;
9990 int resource = target_to_host_resource(arg2);
9992 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9995 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9996 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9997 unlock_user_struct(target_rnew, arg3, 0);
10001 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10002 if (!is_error(ret) && arg4) {
10003 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10006 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10007 target_rold->rlim_max = tswap64(rold.rlim_max);
10008 unlock_user_struct(target_rold, arg4, 1);
10013 #ifdef TARGET_NR_gethostname
10014 case TARGET_NR_gethostname:
10016 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10018 ret = get_errno(gethostname(name, arg2));
10019 unlock_user(name, arg1, arg2);
10021 ret = -TARGET_EFAULT;
10026 #ifdef TARGET_NR_atomic_cmpxchg_32
10027 case TARGET_NR_atomic_cmpxchg_32:
10029 /* should use start_exclusive from main.c */
10030 abi_ulong mem_value;
10031 if (get_user_u32(mem_value, arg6)) {
10032 target_siginfo_t info;
10033 info.si_signo = SIGSEGV;
10035 info.si_code = TARGET_SEGV_MAPERR;
10036 info._sifields._sigfault._addr = arg6;
10037 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10041 if (mem_value == arg2)
10042 put_user_u32(arg1, arg6);
10047 #ifdef TARGET_NR_atomic_barrier
10048 case TARGET_NR_atomic_barrier:
10050 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10056 #ifdef TARGET_NR_timer_create
10057 case TARGET_NR_timer_create:
10059 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10061 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10064 int timer_index = next_free_host_timer();
10066 if (timer_index < 0) {
10067 ret = -TARGET_EAGAIN;
10069 timer_t *phtimer = g_posix_timers + timer_index;
10072 phost_sevp = &host_sevp;
10073 ret = target_to_host_sigevent(phost_sevp, arg2);
10079 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10083 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10092 #ifdef TARGET_NR_timer_settime
10093 case TARGET_NR_timer_settime:
10095 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10096 * struct itimerspec * old_value */
10097 target_timer_t timerid = get_timer_id(arg1);
10101 } else if (arg3 == 0) {
10102 ret = -TARGET_EINVAL;
10104 timer_t htimer = g_posix_timers[timerid];
10105 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
10107 target_to_host_itimerspec(&hspec_new, arg3);
10109 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
10110 host_to_target_itimerspec(arg2, &hspec_old);
10116 #ifdef TARGET_NR_timer_gettime
10117 case TARGET_NR_timer_gettime:
10119 /* args: timer_t timerid, struct itimerspec *curr_value */
10120 target_timer_t timerid = get_timer_id(arg1);
10124 } else if (!arg2) {
10125 ret = -TARGET_EFAULT;
10127 timer_t htimer = g_posix_timers[timerid];
10128 struct itimerspec hspec;
10129 ret = get_errno(timer_gettime(htimer, &hspec));
10131 if (host_to_target_itimerspec(arg2, &hspec)) {
10132 ret = -TARGET_EFAULT;
10139 #ifdef TARGET_NR_timer_getoverrun
10140 case TARGET_NR_timer_getoverrun:
10142 /* args: timer_t timerid */
10143 target_timer_t timerid = get_timer_id(arg1);
10148 timer_t htimer = g_posix_timers[timerid];
10149 ret = get_errno(timer_getoverrun(htimer));
10151 fd_trans_unregister(ret);
10156 #ifdef TARGET_NR_timer_delete
10157 case TARGET_NR_timer_delete:
10159 /* args: timer_t timerid */
10160 target_timer_t timerid = get_timer_id(arg1);
10165 timer_t htimer = g_posix_timers[timerid];
10166 ret = get_errno(timer_delete(htimer));
10167 g_posix_timers[timerid] = 0;
10173 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10174 case TARGET_NR_timerfd_create:
10175 ret = get_errno(timerfd_create(arg1,
10176 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
10180 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10181 case TARGET_NR_timerfd_gettime:
10183 struct itimerspec its_curr;
10185 ret = get_errno(timerfd_gettime(arg1, &its_curr));
10187 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
10194 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10195 case TARGET_NR_timerfd_settime:
10197 struct itimerspec its_new, its_old, *p_new;
10200 if (target_to_host_itimerspec(&its_new, arg3)) {
10208 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
10210 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
10217 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10218 case TARGET_NR_ioprio_get:
10219 ret = get_errno(ioprio_get(arg1, arg2));
10223 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10224 case TARGET_NR_ioprio_set:
10225 ret = get_errno(ioprio_set(arg1, arg2, arg3));
10229 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10230 case TARGET_NR_setns:
10231 ret = get_errno(setns(arg1, arg2));
10234 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10235 case TARGET_NR_unshare:
10236 ret = get_errno(unshare(arg1));
10242 gemu_log("qemu: Unsupported syscall: %d\n", num);
10243 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10244 unimplemented_nowarn:
10246 ret = -TARGET_ENOSYS;
10251 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
10254 print_syscall_ret(num, ret);
10257 ret = -TARGET_EFAULT;