4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
122 //#define DEBUG_ERESTARTSYS
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
197 #define __NR__llseek __NR_lseek
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
206 _syscall0(int, gettid)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group,int,error_code)
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address,int *,tidptr)
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
235 const struct timespec *,timeout,int *,uaddr2,int,val3)
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
239 unsigned long *, user_mask_ptr);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
245 _syscall2(int, capget, struct __user_cap_header_struct *, header,
246 struct __user_cap_data_struct *, data);
247 _syscall2(int, capset, struct __user_cap_header_struct *, header,
248 struct __user_cap_data_struct *, data);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get, int, which, int, who)
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
259 static bitmask_transtbl fcntl_flags_tbl[] = {
260 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
261 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
262 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
263 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
264 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
265 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
266 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
267 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
268 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
269 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
270 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
271 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
272 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
283 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
294 QEMU_IFLA_BR_FORWARD_DELAY,
295 QEMU_IFLA_BR_HELLO_TIME,
296 QEMU_IFLA_BR_MAX_AGE,
297 QEMU_IFLA_BR_AGEING_TIME,
298 QEMU_IFLA_BR_STP_STATE,
299 QEMU_IFLA_BR_PRIORITY,
300 QEMU_IFLA_BR_VLAN_FILTERING,
301 QEMU_IFLA_BR_VLAN_PROTOCOL,
302 QEMU_IFLA_BR_GROUP_FWD_MASK,
303 QEMU_IFLA_BR_ROOT_ID,
304 QEMU_IFLA_BR_BRIDGE_ID,
305 QEMU_IFLA_BR_ROOT_PORT,
306 QEMU_IFLA_BR_ROOT_PATH_COST,
307 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
308 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
309 QEMU_IFLA_BR_HELLO_TIMER,
310 QEMU_IFLA_BR_TCN_TIMER,
311 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
312 QEMU_IFLA_BR_GC_TIMER,
313 QEMU_IFLA_BR_GROUP_ADDR,
314 QEMU_IFLA_BR_FDB_FLUSH,
315 QEMU_IFLA_BR_MCAST_ROUTER,
316 QEMU_IFLA_BR_MCAST_SNOOPING,
317 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
318 QEMU_IFLA_BR_MCAST_QUERIER,
319 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
320 QEMU_IFLA_BR_MCAST_HASH_MAX,
321 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
322 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
323 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
324 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
325 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
326 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
327 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
328 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
329 QEMU_IFLA_BR_NF_CALL_IPTABLES,
330 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
331 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
332 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
334 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
335 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
359 QEMU_IFLA_NET_NS_PID,
362 QEMU_IFLA_VFINFO_LIST,
370 QEMU_IFLA_PROMISCUITY,
371 QEMU_IFLA_NUM_TX_QUEUES,
372 QEMU_IFLA_NUM_RX_QUEUES,
374 QEMU_IFLA_PHYS_PORT_ID,
375 QEMU_IFLA_CARRIER_CHANGES,
376 QEMU_IFLA_PHYS_SWITCH_ID,
377 QEMU_IFLA_LINK_NETNSID,
378 QEMU_IFLA_PHYS_PORT_NAME,
379 QEMU_IFLA_PROTO_DOWN,
380 QEMU_IFLA_GSO_MAX_SEGS,
381 QEMU_IFLA_GSO_MAX_SIZE,
388 QEMU_IFLA_BRPORT_UNSPEC,
389 QEMU_IFLA_BRPORT_STATE,
390 QEMU_IFLA_BRPORT_PRIORITY,
391 QEMU_IFLA_BRPORT_COST,
392 QEMU_IFLA_BRPORT_MODE,
393 QEMU_IFLA_BRPORT_GUARD,
394 QEMU_IFLA_BRPORT_PROTECT,
395 QEMU_IFLA_BRPORT_FAST_LEAVE,
396 QEMU_IFLA_BRPORT_LEARNING,
397 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
398 QEMU_IFLA_BRPORT_PROXYARP,
399 QEMU_IFLA_BRPORT_LEARNING_SYNC,
400 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
401 QEMU_IFLA_BRPORT_ROOT_ID,
402 QEMU_IFLA_BRPORT_BRIDGE_ID,
403 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
404 QEMU_IFLA_BRPORT_DESIGNATED_COST,
407 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
408 QEMU_IFLA_BRPORT_CONFIG_PENDING,
409 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
410 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
411 QEMU_IFLA_BRPORT_HOLD_TIMER,
412 QEMU_IFLA_BRPORT_FLUSH,
413 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
414 QEMU_IFLA_BRPORT_PAD,
415 QEMU___IFLA_BRPORT_MAX
419 QEMU_IFLA_INFO_UNSPEC,
422 QEMU_IFLA_INFO_XSTATS,
423 QEMU_IFLA_INFO_SLAVE_KIND,
424 QEMU_IFLA_INFO_SLAVE_DATA,
425 QEMU___IFLA_INFO_MAX,
429 QEMU_IFLA_INET_UNSPEC,
431 QEMU___IFLA_INET_MAX,
435 QEMU_IFLA_INET6_UNSPEC,
436 QEMU_IFLA_INET6_FLAGS,
437 QEMU_IFLA_INET6_CONF,
438 QEMU_IFLA_INET6_STATS,
439 QEMU_IFLA_INET6_MCAST,
440 QEMU_IFLA_INET6_CACHEINFO,
441 QEMU_IFLA_INET6_ICMP6STATS,
442 QEMU_IFLA_INET6_TOKEN,
443 QEMU_IFLA_INET6_ADDR_GEN_MODE,
444 QEMU___IFLA_INET6_MAX
447 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
448 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
449 typedef struct TargetFdTrans {
450 TargetFdDataFunc host_to_target_data;
451 TargetFdDataFunc target_to_host_data;
452 TargetFdAddrFunc target_to_host_addr;
455 static TargetFdTrans **target_fd_trans;
457 static unsigned int target_fd_max;
459 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
461 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
462 return target_fd_trans[fd]->target_to_host_data;
467 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
469 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
470 return target_fd_trans[fd]->host_to_target_data;
475 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
477 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
478 return target_fd_trans[fd]->target_to_host_addr;
483 static void fd_trans_register(int fd, TargetFdTrans *trans)
487 if (fd >= target_fd_max) {
488 oldmax = target_fd_max;
489 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
490 target_fd_trans = g_renew(TargetFdTrans *,
491 target_fd_trans, target_fd_max);
492 memset((void *)(target_fd_trans + oldmax), 0,
493 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
495 target_fd_trans[fd] = trans;
498 static void fd_trans_unregister(int fd)
500 if (fd >= 0 && fd < target_fd_max) {
501 target_fd_trans[fd] = NULL;
505 static void fd_trans_dup(int oldfd, int newfd)
507 fd_trans_unregister(newfd);
508 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
509 fd_trans_register(newfd, target_fd_trans[oldfd]);
513 static int sys_getcwd1(char *buf, size_t size)
515 if (getcwd(buf, size) == NULL) {
516 /* getcwd() sets errno */
519 return strlen(buf)+1;
522 #ifdef TARGET_NR_utimensat
523 #if defined(__NR_utimensat)
524 #define __NR_sys_utimensat __NR_utimensat
525 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
526 const struct timespec *,tsp,int,flags)
528 static int sys_utimensat(int dirfd, const char *pathname,
529 const struct timespec times[2], int flags)
535 #endif /* TARGET_NR_utimensat */
537 #ifdef CONFIG_INOTIFY
538 #include <sys/inotify.h>
540 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
541 static int sys_inotify_init(void)
543 return (inotify_init());
546 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
547 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
549 return (inotify_add_watch(fd, pathname, mask));
552 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
553 static int sys_inotify_rm_watch(int fd, int32_t wd)
555 return (inotify_rm_watch(fd, wd));
558 #ifdef CONFIG_INOTIFY1
559 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
560 static int sys_inotify_init1(int flags)
562 return (inotify_init1(flags));
567 /* Userspace can usually survive runtime without inotify */
568 #undef TARGET_NR_inotify_init
569 #undef TARGET_NR_inotify_init1
570 #undef TARGET_NR_inotify_add_watch
571 #undef TARGET_NR_inotify_rm_watch
572 #endif /* CONFIG_INOTIFY */
574 #if defined(TARGET_NR_prlimit64)
575 #ifndef __NR_prlimit64
576 # define __NR_prlimit64 -1
578 #define __NR_sys_prlimit64 __NR_prlimit64
579 /* The glibc rlimit structure may not be that used by the underlying syscall */
580 struct host_rlimit64 {
584 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
585 const struct host_rlimit64 *, new_limit,
586 struct host_rlimit64 *, old_limit)
590 #if defined(TARGET_NR_timer_create)
591 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
592 static timer_t g_posix_timers[32] = { 0, } ;
594 static inline int next_free_host_timer(void)
597 /* FIXME: Does finding the next free slot require a lock? */
598 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
599 if (g_posix_timers[k] == 0) {
600 g_posix_timers[k] = (timer_t) 1;
608 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
610 static inline int regpairs_aligned(void *cpu_env) {
611 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
613 #elif defined(TARGET_MIPS)
614 static inline int regpairs_aligned(void *cpu_env) { return 1; }
615 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
616 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
617 * of registers which translates to the same as ARM/MIPS, because we start with
619 static inline int regpairs_aligned(void *cpu_env) { return 1; }
621 static inline int regpairs_aligned(void *cpu_env) { return 0; }
624 #define ERRNO_TABLE_SIZE 1200
626 /* target_to_host_errno_table[] is initialized from
627 * host_to_target_errno_table[] in syscall_init(). */
628 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
632 * This list is the union of errno values overridden in asm-<arch>/errno.h
633 * minus the errnos that are not actually generic to all archs.
635 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
636 [EAGAIN] = TARGET_EAGAIN,
637 [EIDRM] = TARGET_EIDRM,
638 [ECHRNG] = TARGET_ECHRNG,
639 [EL2NSYNC] = TARGET_EL2NSYNC,
640 [EL3HLT] = TARGET_EL3HLT,
641 [EL3RST] = TARGET_EL3RST,
642 [ELNRNG] = TARGET_ELNRNG,
643 [EUNATCH] = TARGET_EUNATCH,
644 [ENOCSI] = TARGET_ENOCSI,
645 [EL2HLT] = TARGET_EL2HLT,
646 [EDEADLK] = TARGET_EDEADLK,
647 [ENOLCK] = TARGET_ENOLCK,
648 [EBADE] = TARGET_EBADE,
649 [EBADR] = TARGET_EBADR,
650 [EXFULL] = TARGET_EXFULL,
651 [ENOANO] = TARGET_ENOANO,
652 [EBADRQC] = TARGET_EBADRQC,
653 [EBADSLT] = TARGET_EBADSLT,
654 [EBFONT] = TARGET_EBFONT,
655 [ENOSTR] = TARGET_ENOSTR,
656 [ENODATA] = TARGET_ENODATA,
657 [ETIME] = TARGET_ETIME,
658 [ENOSR] = TARGET_ENOSR,
659 [ENONET] = TARGET_ENONET,
660 [ENOPKG] = TARGET_ENOPKG,
661 [EREMOTE] = TARGET_EREMOTE,
662 [ENOLINK] = TARGET_ENOLINK,
663 [EADV] = TARGET_EADV,
664 [ESRMNT] = TARGET_ESRMNT,
665 [ECOMM] = TARGET_ECOMM,
666 [EPROTO] = TARGET_EPROTO,
667 [EDOTDOT] = TARGET_EDOTDOT,
668 [EMULTIHOP] = TARGET_EMULTIHOP,
669 [EBADMSG] = TARGET_EBADMSG,
670 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
671 [EOVERFLOW] = TARGET_EOVERFLOW,
672 [ENOTUNIQ] = TARGET_ENOTUNIQ,
673 [EBADFD] = TARGET_EBADFD,
674 [EREMCHG] = TARGET_EREMCHG,
675 [ELIBACC] = TARGET_ELIBACC,
676 [ELIBBAD] = TARGET_ELIBBAD,
677 [ELIBSCN] = TARGET_ELIBSCN,
678 [ELIBMAX] = TARGET_ELIBMAX,
679 [ELIBEXEC] = TARGET_ELIBEXEC,
680 [EILSEQ] = TARGET_EILSEQ,
681 [ENOSYS] = TARGET_ENOSYS,
682 [ELOOP] = TARGET_ELOOP,
683 [ERESTART] = TARGET_ERESTART,
684 [ESTRPIPE] = TARGET_ESTRPIPE,
685 [ENOTEMPTY] = TARGET_ENOTEMPTY,
686 [EUSERS] = TARGET_EUSERS,
687 [ENOTSOCK] = TARGET_ENOTSOCK,
688 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
689 [EMSGSIZE] = TARGET_EMSGSIZE,
690 [EPROTOTYPE] = TARGET_EPROTOTYPE,
691 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
692 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
693 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
694 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
695 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
696 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
697 [EADDRINUSE] = TARGET_EADDRINUSE,
698 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
699 [ENETDOWN] = TARGET_ENETDOWN,
700 [ENETUNREACH] = TARGET_ENETUNREACH,
701 [ENETRESET] = TARGET_ENETRESET,
702 [ECONNABORTED] = TARGET_ECONNABORTED,
703 [ECONNRESET] = TARGET_ECONNRESET,
704 [ENOBUFS] = TARGET_ENOBUFS,
705 [EISCONN] = TARGET_EISCONN,
706 [ENOTCONN] = TARGET_ENOTCONN,
707 [EUCLEAN] = TARGET_EUCLEAN,
708 [ENOTNAM] = TARGET_ENOTNAM,
709 [ENAVAIL] = TARGET_ENAVAIL,
710 [EISNAM] = TARGET_EISNAM,
711 [EREMOTEIO] = TARGET_EREMOTEIO,
712 [ESHUTDOWN] = TARGET_ESHUTDOWN,
713 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
714 [ETIMEDOUT] = TARGET_ETIMEDOUT,
715 [ECONNREFUSED] = TARGET_ECONNREFUSED,
716 [EHOSTDOWN] = TARGET_EHOSTDOWN,
717 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
718 [EALREADY] = TARGET_EALREADY,
719 [EINPROGRESS] = TARGET_EINPROGRESS,
720 [ESTALE] = TARGET_ESTALE,
721 [ECANCELED] = TARGET_ECANCELED,
722 [ENOMEDIUM] = TARGET_ENOMEDIUM,
723 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
725 [ENOKEY] = TARGET_ENOKEY,
728 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
731 [EKEYREVOKED] = TARGET_EKEYREVOKED,
734 [EKEYREJECTED] = TARGET_EKEYREJECTED,
737 [EOWNERDEAD] = TARGET_EOWNERDEAD,
739 #ifdef ENOTRECOVERABLE
740 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
744 static inline int host_to_target_errno(int err)
746 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
747 host_to_target_errno_table[err]) {
748 return host_to_target_errno_table[err];
753 static inline int target_to_host_errno(int err)
755 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
756 target_to_host_errno_table[err]) {
757 return target_to_host_errno_table[err];
762 static inline abi_long get_errno(abi_long ret)
765 return -host_to_target_errno(errno);
770 static inline int is_error(abi_long ret)
772 return (abi_ulong)ret >= (abi_ulong)(-4096);
775 const char *target_strerror(int err)
777 if (err == TARGET_ERESTARTSYS) {
778 return "To be restarted";
780 if (err == TARGET_QEMU_ESIGRETURN) {
781 return "Successful exit from sigreturn";
784 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
787 return strerror(target_to_host_errno(err));
790 #define safe_syscall0(type, name) \
791 static type safe_##name(void) \
793 return safe_syscall(__NR_##name); \
796 #define safe_syscall1(type, name, type1, arg1) \
797 static type safe_##name(type1 arg1) \
799 return safe_syscall(__NR_##name, arg1); \
802 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
803 static type safe_##name(type1 arg1, type2 arg2) \
805 return safe_syscall(__NR_##name, arg1, arg2); \
808 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
809 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
811 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
814 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
816 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
818 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
821 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
822 type4, arg4, type5, arg5) \
823 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
826 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
829 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
830 type4, arg4, type5, arg5, type6, arg6) \
831 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
832 type5 arg5, type6 arg6) \
834 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
837 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
838 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
839 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
840 int, flags, mode_t, mode)
841 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
842 struct rusage *, rusage)
843 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
844 int, options, struct rusage *, rusage)
845 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
846 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
847 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
848 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
849 struct timespec *, tsp, const sigset_t *, sigmask,
851 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
852 int, maxevents, int, timeout, const sigset_t *, sigmask,
854 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
855 const struct timespec *,timeout,int *,uaddr2,int,val3)
856 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
857 safe_syscall2(int, kill, pid_t, pid, int, sig)
858 safe_syscall2(int, tkill, int, tid, int, sig)
859 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
860 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
861 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
862 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
864 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
865 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
866 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
867 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
868 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
869 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
870 safe_syscall2(int, flock, int, fd, int, operation)
871 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
872 const struct timespec *, uts, size_t, sigsetsize)
873 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
875 safe_syscall2(int, nanosleep, const struct timespec *, req,
876 struct timespec *, rem)
877 #ifdef TARGET_NR_clock_nanosleep
878 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
879 const struct timespec *, req, struct timespec *, rem)
882 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
884 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
885 long, msgtype, int, flags)
886 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
887 unsigned, nsops, const struct timespec *, timeout)
889 /* This host kernel architecture uses a single ipc syscall; fake up
890 * wrappers for the sub-operations to hide this implementation detail.
891 * Annoyingly we can't include linux/ipc.h to get the constant definitions
892 * for the call parameter because some structs in there conflict with the
893 * sys/ipc.h ones. So we just define them here, and rely on them being
894 * the same for all host architectures.
896 #define Q_SEMTIMEDOP 4
899 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
901 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
902 void *, ptr, long, fifth)
903 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
905 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
907 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
909 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
911 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
912 const struct timespec *timeout)
914 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
918 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
919 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
920 size_t, len, unsigned, prio, const struct timespec *, timeout)
921 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
922 size_t, len, unsigned *, prio, const struct timespec *, timeout)
924 /* We do ioctl like this rather than via safe_syscall3 to preserve the
925 * "third argument might be integer or pointer or not present" behaviour of
928 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
929 /* Similarly for fcntl. Note that callers must always:
930 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
931 * use the flock64 struct rather than unsuffixed flock
932 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
935 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
937 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
940 static inline int host_to_target_sock_type(int host_type)
944 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
946 target_type = TARGET_SOCK_DGRAM;
949 target_type = TARGET_SOCK_STREAM;
952 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
956 #if defined(SOCK_CLOEXEC)
957 if (host_type & SOCK_CLOEXEC) {
958 target_type |= TARGET_SOCK_CLOEXEC;
962 #if defined(SOCK_NONBLOCK)
963 if (host_type & SOCK_NONBLOCK) {
964 target_type |= TARGET_SOCK_NONBLOCK;
971 static abi_ulong target_brk;
972 static abi_ulong target_original_brk;
973 static abi_ulong brk_page;
975 void target_set_brk(abi_ulong new_brk)
977 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
978 brk_page = HOST_PAGE_ALIGN(target_brk);
981 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
982 #define DEBUGF_BRK(message, args...)
984 /* do_brk() must return target values and target errnos. */
985 abi_long do_brk(abi_ulong new_brk)
987 abi_long mapped_addr;
988 abi_ulong new_alloc_size;
990 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
993 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
996 if (new_brk < target_original_brk) {
997 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1002 /* If the new brk is less than the highest page reserved to the
1003 * target heap allocation, set it and we're almost done... */
1004 if (new_brk <= brk_page) {
1005 /* Heap contents are initialized to zero, as for anonymous
1007 if (new_brk > target_brk) {
1008 memset(g2h(target_brk), 0, new_brk - target_brk);
1010 target_brk = new_brk;
1011 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1015 /* We need to allocate more memory after the brk... Note that
1016 * we don't use MAP_FIXED because that will map over the top of
1017 * any existing mapping (like the one with the host libc or qemu
1018 * itself); instead we treat "mapped but at wrong address" as
1019 * a failure and unmap again.
1021 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1022 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1023 PROT_READ|PROT_WRITE,
1024 MAP_ANON|MAP_PRIVATE, 0, 0));
1026 if (mapped_addr == brk_page) {
1027 /* Heap contents are initialized to zero, as for anonymous
1028 * mapped pages. Technically the new pages are already
1029 * initialized to zero since they *are* anonymous mapped
1030 * pages, however we have to take care with the contents that
1031 * come from the remaining part of the previous page: it may
1032 * contains garbage data due to a previous heap usage (grown
1033 * then shrunken). */
1034 memset(g2h(target_brk), 0, brk_page - target_brk);
1036 target_brk = new_brk;
1037 brk_page = HOST_PAGE_ALIGN(target_brk);
1038 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1041 } else if (mapped_addr != -1) {
1042 /* Mapped but at wrong address, meaning there wasn't actually
1043 * enough space for this brk.
1045 target_munmap(mapped_addr, new_alloc_size);
1047 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1050 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1053 #if defined(TARGET_ALPHA)
1054 /* We (partially) emulate OSF/1 on Alpha, which requires we
1055 return a proper errno, not an unchanged brk value. */
1056 return -TARGET_ENOMEM;
1058 /* For everything else, return the previous break. */
1062 static inline abi_long copy_from_user_fdset(fd_set *fds,
1063 abi_ulong target_fds_addr,
1067 abi_ulong b, *target_fds;
1069 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1070 if (!(target_fds = lock_user(VERIFY_READ,
1072 sizeof(abi_ulong) * nw,
1074 return -TARGET_EFAULT;
1078 for (i = 0; i < nw; i++) {
1079 /* grab the abi_ulong */
1080 __get_user(b, &target_fds[i]);
1081 for (j = 0; j < TARGET_ABI_BITS; j++) {
1082 /* check the bit inside the abi_ulong */
1089 unlock_user(target_fds, target_fds_addr, 0);
1094 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1095 abi_ulong target_fds_addr,
1098 if (target_fds_addr) {
1099 if (copy_from_user_fdset(fds, target_fds_addr, n))
1100 return -TARGET_EFAULT;
1108 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1114 abi_ulong *target_fds;
1116 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1117 if (!(target_fds = lock_user(VERIFY_WRITE,
1119 sizeof(abi_ulong) * nw,
1121 return -TARGET_EFAULT;
1124 for (i = 0; i < nw; i++) {
1126 for (j = 0; j < TARGET_ABI_BITS; j++) {
1127 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1130 __put_user(v, &target_fds[i]);
1133 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1138 #if defined(__alpha__)
1139 #define HOST_HZ 1024
1144 static inline abi_long host_to_target_clock_t(long ticks)
1146 #if HOST_HZ == TARGET_HZ
1149 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1153 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1154 const struct rusage *rusage)
1156 struct target_rusage *target_rusage;
1158 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1159 return -TARGET_EFAULT;
1160 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1161 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1162 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1163 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1164 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1165 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1166 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1167 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1168 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1169 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1170 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1171 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1172 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1173 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1174 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1175 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1176 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1177 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1178 unlock_user_struct(target_rusage, target_addr, 1);
1183 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1185 abi_ulong target_rlim_swap;
1188 target_rlim_swap = tswapal(target_rlim);
1189 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1190 return RLIM_INFINITY;
1192 result = target_rlim_swap;
1193 if (target_rlim_swap != (rlim_t)result)
1194 return RLIM_INFINITY;
1199 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1201 abi_ulong target_rlim_swap;
1204 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1205 target_rlim_swap = TARGET_RLIM_INFINITY;
1207 target_rlim_swap = rlim;
1208 result = tswapal(target_rlim_swap);
1213 static inline int target_to_host_resource(int code)
1216 case TARGET_RLIMIT_AS:
1218 case TARGET_RLIMIT_CORE:
1220 case TARGET_RLIMIT_CPU:
1222 case TARGET_RLIMIT_DATA:
1224 case TARGET_RLIMIT_FSIZE:
1225 return RLIMIT_FSIZE;
1226 case TARGET_RLIMIT_LOCKS:
1227 return RLIMIT_LOCKS;
1228 case TARGET_RLIMIT_MEMLOCK:
1229 return RLIMIT_MEMLOCK;
1230 case TARGET_RLIMIT_MSGQUEUE:
1231 return RLIMIT_MSGQUEUE;
1232 case TARGET_RLIMIT_NICE:
1234 case TARGET_RLIMIT_NOFILE:
1235 return RLIMIT_NOFILE;
1236 case TARGET_RLIMIT_NPROC:
1237 return RLIMIT_NPROC;
1238 case TARGET_RLIMIT_RSS:
1240 case TARGET_RLIMIT_RTPRIO:
1241 return RLIMIT_RTPRIO;
1242 case TARGET_RLIMIT_SIGPENDING:
1243 return RLIMIT_SIGPENDING;
1244 case TARGET_RLIMIT_STACK:
1245 return RLIMIT_STACK;
1251 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1252 abi_ulong target_tv_addr)
1254 struct target_timeval *target_tv;
1256 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1257 return -TARGET_EFAULT;
1259 __get_user(tv->tv_sec, &target_tv->tv_sec);
1260 __get_user(tv->tv_usec, &target_tv->tv_usec);
1262 unlock_user_struct(target_tv, target_tv_addr, 0);
1267 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1268 const struct timeval *tv)
1270 struct target_timeval *target_tv;
1272 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1273 return -TARGET_EFAULT;
1275 __put_user(tv->tv_sec, &target_tv->tv_sec);
1276 __put_user(tv->tv_usec, &target_tv->tv_usec);
1278 unlock_user_struct(target_tv, target_tv_addr, 1);
1283 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1284 abi_ulong target_tz_addr)
1286 struct target_timezone *target_tz;
1288 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1289 return -TARGET_EFAULT;
1292 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1293 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1295 unlock_user_struct(target_tz, target_tz_addr, 0);
1300 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1303 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1304 abi_ulong target_mq_attr_addr)
1306 struct target_mq_attr *target_mq_attr;
1308 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1309 target_mq_attr_addr, 1))
1310 return -TARGET_EFAULT;
1312 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1313 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1314 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1315 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1317 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1322 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1323 const struct mq_attr *attr)
1325 struct target_mq_attr *target_mq_attr;
1327 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1328 target_mq_attr_addr, 0))
1329 return -TARGET_EFAULT;
1331 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1332 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1333 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1334 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1336 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1342 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1343 /* do_select() must return target values and target errnos. */
1344 static abi_long do_select(int n,
1345 abi_ulong rfd_addr, abi_ulong wfd_addr,
1346 abi_ulong efd_addr, abi_ulong target_tv_addr)
1348 fd_set rfds, wfds, efds;
1349 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1351 struct timespec ts, *ts_ptr;
1354 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1358 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1362 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1367 if (target_tv_addr) {
1368 if (copy_from_user_timeval(&tv, target_tv_addr))
1369 return -TARGET_EFAULT;
1370 ts.tv_sec = tv.tv_sec;
1371 ts.tv_nsec = tv.tv_usec * 1000;
1377 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1380 if (!is_error(ret)) {
1381 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1382 return -TARGET_EFAULT;
1383 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1384 return -TARGET_EFAULT;
1385 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1386 return -TARGET_EFAULT;
1388 if (target_tv_addr) {
1389 tv.tv_sec = ts.tv_sec;
1390 tv.tv_usec = ts.tv_nsec / 1000;
1391 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1392 return -TARGET_EFAULT;
1401 static abi_long do_pipe2(int host_pipe[], int flags)
1404 return pipe2(host_pipe, flags);
1410 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1411 int flags, int is_pipe2)
1415 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1418 return get_errno(ret);
1420 /* Several targets have special calling conventions for the original
1421 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1423 #if defined(TARGET_ALPHA)
1424 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1425 return host_pipe[0];
1426 #elif defined(TARGET_MIPS)
1427 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1428 return host_pipe[0];
1429 #elif defined(TARGET_SH4)
1430 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1431 return host_pipe[0];
1432 #elif defined(TARGET_SPARC)
1433 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1434 return host_pipe[0];
1438 if (put_user_s32(host_pipe[0], pipedes)
1439 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1440 return -TARGET_EFAULT;
1441 return get_errno(ret);
1444 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1445 abi_ulong target_addr,
1448 struct target_ip_mreqn *target_smreqn;
1450 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1452 return -TARGET_EFAULT;
1453 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1454 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1455 if (len == sizeof(struct target_ip_mreqn))
1456 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1457 unlock_user(target_smreqn, target_addr, 0);
1462 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1463 abi_ulong target_addr,
1466 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1467 sa_family_t sa_family;
1468 struct target_sockaddr *target_saddr;
1470 if (fd_trans_target_to_host_addr(fd)) {
1471 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1474 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1476 return -TARGET_EFAULT;
1478 sa_family = tswap16(target_saddr->sa_family);
1480 /* Oops. The caller might send a incomplete sun_path; sun_path
1481 * must be terminated by \0 (see the manual page), but
1482 * unfortunately it is quite common to specify sockaddr_un
1483 * length as "strlen(x->sun_path)" while it should be
1484 * "strlen(...) + 1". We'll fix that here if needed.
1485 * Linux kernel has a similar feature.
1488 if (sa_family == AF_UNIX) {
1489 if (len < unix_maxlen && len > 0) {
1490 char *cp = (char*)target_saddr;
1492 if ( cp[len-1] && !cp[len] )
1495 if (len > unix_maxlen)
1499 memcpy(addr, target_saddr, len);
1500 addr->sa_family = sa_family;
1501 if (sa_family == AF_NETLINK) {
1502 struct sockaddr_nl *nladdr;
1504 nladdr = (struct sockaddr_nl *)addr;
1505 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1506 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1507 } else if (sa_family == AF_PACKET) {
1508 struct target_sockaddr_ll *lladdr;
1510 lladdr = (struct target_sockaddr_ll *)addr;
1511 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1512 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1514 unlock_user(target_saddr, target_addr, 0);
1519 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1520 struct sockaddr *addr,
1523 struct target_sockaddr *target_saddr;
1529 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1531 return -TARGET_EFAULT;
1532 memcpy(target_saddr, addr, len);
1533 if (len >= offsetof(struct target_sockaddr, sa_family) +
1534 sizeof(target_saddr->sa_family)) {
1535 target_saddr->sa_family = tswap16(addr->sa_family);
1537 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1538 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1539 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1540 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1541 } else if (addr->sa_family == AF_PACKET) {
1542 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1543 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1544 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1546 unlock_user(target_saddr, target_addr, len);
1551 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1552 struct target_msghdr *target_msgh)
1554 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1555 abi_long msg_controllen;
1556 abi_ulong target_cmsg_addr;
1557 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1558 socklen_t space = 0;
1560 msg_controllen = tswapal(target_msgh->msg_controllen);
1561 if (msg_controllen < sizeof (struct target_cmsghdr))
1563 target_cmsg_addr = tswapal(target_msgh->msg_control);
1564 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1565 target_cmsg_start = target_cmsg;
1567 return -TARGET_EFAULT;
1569 while (cmsg && target_cmsg) {
1570 void *data = CMSG_DATA(cmsg);
1571 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1573 int len = tswapal(target_cmsg->cmsg_len)
1574 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1576 space += CMSG_SPACE(len);
1577 if (space > msgh->msg_controllen) {
1578 space -= CMSG_SPACE(len);
1579 /* This is a QEMU bug, since we allocated the payload
1580 * area ourselves (unlike overflow in host-to-target
1581 * conversion, which is just the guest giving us a buffer
1582 * that's too small). It can't happen for the payload types
1583 * we currently support; if it becomes an issue in future
1584 * we would need to improve our allocation strategy to
1585 * something more intelligent than "twice the size of the
1586 * target buffer we're reading from".
1588 gemu_log("Host cmsg overflow\n");
1592 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1593 cmsg->cmsg_level = SOL_SOCKET;
1595 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1597 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1598 cmsg->cmsg_len = CMSG_LEN(len);
1600 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1601 int *fd = (int *)data;
1602 int *target_fd = (int *)target_data;
1603 int i, numfds = len / sizeof(int);
1605 for (i = 0; i < numfds; i++) {
1606 __get_user(fd[i], target_fd + i);
1608 } else if (cmsg->cmsg_level == SOL_SOCKET
1609 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1610 struct ucred *cred = (struct ucred *)data;
1611 struct target_ucred *target_cred =
1612 (struct target_ucred *)target_data;
1614 __get_user(cred->pid, &target_cred->pid);
1615 __get_user(cred->uid, &target_cred->uid);
1616 __get_user(cred->gid, &target_cred->gid);
1618 gemu_log("Unsupported ancillary data: %d/%d\n",
1619 cmsg->cmsg_level, cmsg->cmsg_type);
1620 memcpy(data, target_data, len);
1623 cmsg = CMSG_NXTHDR(msgh, cmsg);
1624 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1627 unlock_user(target_cmsg, target_cmsg_addr, 0);
1629 msgh->msg_controllen = space;
1633 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1634 struct msghdr *msgh)
1636 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1637 abi_long msg_controllen;
1638 abi_ulong target_cmsg_addr;
1639 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1640 socklen_t space = 0;
1642 msg_controllen = tswapal(target_msgh->msg_controllen);
1643 if (msg_controllen < sizeof (struct target_cmsghdr))
1645 target_cmsg_addr = tswapal(target_msgh->msg_control);
1646 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1647 target_cmsg_start = target_cmsg;
1649 return -TARGET_EFAULT;
1651 while (cmsg && target_cmsg) {
1652 void *data = CMSG_DATA(cmsg);
1653 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1655 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1656 int tgt_len, tgt_space;
1658 /* We never copy a half-header but may copy half-data;
1659 * this is Linux's behaviour in put_cmsg(). Note that
1660 * truncation here is a guest problem (which we report
1661 * to the guest via the CTRUNC bit), unlike truncation
1662 * in target_to_host_cmsg, which is a QEMU bug.
1664 if (msg_controllen < sizeof(struct cmsghdr)) {
1665 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1669 if (cmsg->cmsg_level == SOL_SOCKET) {
1670 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1672 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1674 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1676 tgt_len = TARGET_CMSG_LEN(len);
1678 /* Payload types which need a different size of payload on
1679 * the target must adjust tgt_len here.
1681 switch (cmsg->cmsg_level) {
1683 switch (cmsg->cmsg_type) {
1685 tgt_len = sizeof(struct target_timeval);
1694 if (msg_controllen < tgt_len) {
1695 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1696 tgt_len = msg_controllen;
1699 /* We must now copy-and-convert len bytes of payload
1700 * into tgt_len bytes of destination space. Bear in mind
1701 * that in both source and destination we may be dealing
1702 * with a truncated value!
1704 switch (cmsg->cmsg_level) {
1706 switch (cmsg->cmsg_type) {
1709 int *fd = (int *)data;
1710 int *target_fd = (int *)target_data;
1711 int i, numfds = tgt_len / sizeof(int);
1713 for (i = 0; i < numfds; i++) {
1714 __put_user(fd[i], target_fd + i);
1720 struct timeval *tv = (struct timeval *)data;
1721 struct target_timeval *target_tv =
1722 (struct target_timeval *)target_data;
1724 if (len != sizeof(struct timeval) ||
1725 tgt_len != sizeof(struct target_timeval)) {
1729 /* copy struct timeval to target */
1730 __put_user(tv->tv_sec, &target_tv->tv_sec);
1731 __put_user(tv->tv_usec, &target_tv->tv_usec);
1734 case SCM_CREDENTIALS:
1736 struct ucred *cred = (struct ucred *)data;
1737 struct target_ucred *target_cred =
1738 (struct target_ucred *)target_data;
1740 __put_user(cred->pid, &target_cred->pid);
1741 __put_user(cred->uid, &target_cred->uid);
1742 __put_user(cred->gid, &target_cred->gid);
1752 gemu_log("Unsupported ancillary data: %d/%d\n",
1753 cmsg->cmsg_level, cmsg->cmsg_type);
1754 memcpy(target_data, data, MIN(len, tgt_len));
1755 if (tgt_len > len) {
1756 memset(target_data + len, 0, tgt_len - len);
1760 target_cmsg->cmsg_len = tswapal(tgt_len);
1761 tgt_space = TARGET_CMSG_SPACE(len);
1762 if (msg_controllen < tgt_space) {
1763 tgt_space = msg_controllen;
1765 msg_controllen -= tgt_space;
1767 cmsg = CMSG_NXTHDR(msgh, cmsg);
1768 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1771 unlock_user(target_cmsg, target_cmsg_addr, space);
1773 target_msgh->msg_controllen = tswapal(space);
1777 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1779 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1780 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1781 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1782 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1783 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1786 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1788 abi_long (*host_to_target_nlmsg)
1789 (struct nlmsghdr *))
1794 while (len > sizeof(struct nlmsghdr)) {
1796 nlmsg_len = nlh->nlmsg_len;
1797 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1802 switch (nlh->nlmsg_type) {
1804 tswap_nlmsghdr(nlh);
1810 struct nlmsgerr *e = NLMSG_DATA(nlh);
1811 e->error = tswap32(e->error);
1812 tswap_nlmsghdr(&e->msg);
1813 tswap_nlmsghdr(nlh);
1817 ret = host_to_target_nlmsg(nlh);
1819 tswap_nlmsghdr(nlh);
1824 tswap_nlmsghdr(nlh);
1825 len -= NLMSG_ALIGN(nlmsg_len);
1826 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1831 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1833 abi_long (*target_to_host_nlmsg)
1834 (struct nlmsghdr *))
1838 while (len > sizeof(struct nlmsghdr)) {
1839 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1840 tswap32(nlh->nlmsg_len) > len) {
1843 tswap_nlmsghdr(nlh);
1844 switch (nlh->nlmsg_type) {
1851 struct nlmsgerr *e = NLMSG_DATA(nlh);
1852 e->error = tswap32(e->error);
1853 tswap_nlmsghdr(&e->msg);
1857 ret = target_to_host_nlmsg(nlh);
1862 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1863 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1868 #ifdef CONFIG_RTNETLINK
1869 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1870 size_t len, void *context,
1871 abi_long (*host_to_target_nlattr)
1875 unsigned short nla_len;
1878 while (len > sizeof(struct nlattr)) {
1879 nla_len = nlattr->nla_len;
1880 if (nla_len < sizeof(struct nlattr) ||
1884 ret = host_to_target_nlattr(nlattr, context);
1885 nlattr->nla_len = tswap16(nlattr->nla_len);
1886 nlattr->nla_type = tswap16(nlattr->nla_type);
1890 len -= NLA_ALIGN(nla_len);
1891 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1896 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1898 abi_long (*host_to_target_rtattr)
1901 unsigned short rta_len;
1904 while (len > sizeof(struct rtattr)) {
1905 rta_len = rtattr->rta_len;
1906 if (rta_len < sizeof(struct rtattr) ||
1910 ret = host_to_target_rtattr(rtattr);
1911 rtattr->rta_len = tswap16(rtattr->rta_len);
1912 rtattr->rta_type = tswap16(rtattr->rta_type);
1916 len -= RTA_ALIGN(rta_len);
1917 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1922 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1924 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
1931 switch (nlattr->nla_type) {
1933 case QEMU_IFLA_BR_FDB_FLUSH:
1936 case QEMU_IFLA_BR_GROUP_ADDR:
1939 case QEMU_IFLA_BR_VLAN_FILTERING:
1940 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
1941 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
1942 case QEMU_IFLA_BR_MCAST_ROUTER:
1943 case QEMU_IFLA_BR_MCAST_SNOOPING:
1944 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
1945 case QEMU_IFLA_BR_MCAST_QUERIER:
1946 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
1947 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
1948 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
1951 case QEMU_IFLA_BR_PRIORITY:
1952 case QEMU_IFLA_BR_VLAN_PROTOCOL:
1953 case QEMU_IFLA_BR_GROUP_FWD_MASK:
1954 case QEMU_IFLA_BR_ROOT_PORT:
1955 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
1956 u16 = NLA_DATA(nlattr);
1957 *u16 = tswap16(*u16);
1960 case QEMU_IFLA_BR_FORWARD_DELAY:
1961 case QEMU_IFLA_BR_HELLO_TIME:
1962 case QEMU_IFLA_BR_MAX_AGE:
1963 case QEMU_IFLA_BR_AGEING_TIME:
1964 case QEMU_IFLA_BR_STP_STATE:
1965 case QEMU_IFLA_BR_ROOT_PATH_COST:
1966 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
1967 case QEMU_IFLA_BR_MCAST_HASH_MAX:
1968 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
1969 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
1970 u32 = NLA_DATA(nlattr);
1971 *u32 = tswap32(*u32);
1974 case QEMU_IFLA_BR_HELLO_TIMER:
1975 case QEMU_IFLA_BR_TCN_TIMER:
1976 case QEMU_IFLA_BR_GC_TIMER:
1977 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
1978 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
1979 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
1980 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
1981 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
1982 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
1983 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
1984 u64 = NLA_DATA(nlattr);
1985 *u64 = tswap64(*u64);
1987 /* ifla_bridge_id: uin8_t[] */
1988 case QEMU_IFLA_BR_ROOT_ID:
1989 case QEMU_IFLA_BR_BRIDGE_ID:
1992 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
1998 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2005 switch (nlattr->nla_type) {
2007 case QEMU_IFLA_BRPORT_STATE:
2008 case QEMU_IFLA_BRPORT_MODE:
2009 case QEMU_IFLA_BRPORT_GUARD:
2010 case QEMU_IFLA_BRPORT_PROTECT:
2011 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2012 case QEMU_IFLA_BRPORT_LEARNING:
2013 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2014 case QEMU_IFLA_BRPORT_PROXYARP:
2015 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2016 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2017 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2018 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2019 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2022 case QEMU_IFLA_BRPORT_PRIORITY:
2023 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2024 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2025 case QEMU_IFLA_BRPORT_ID:
2026 case QEMU_IFLA_BRPORT_NO:
2027 u16 = NLA_DATA(nlattr);
2028 *u16 = tswap16(*u16);
2031 case QEMU_IFLA_BRPORT_COST:
2032 u32 = NLA_DATA(nlattr);
2033 *u32 = tswap32(*u32);
2036 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2037 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2038 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2039 u64 = NLA_DATA(nlattr);
2040 *u64 = tswap64(*u64);
2042 /* ifla_bridge_id: uint8_t[] */
2043 case QEMU_IFLA_BRPORT_ROOT_ID:
2044 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2047 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2053 struct linkinfo_context {
2060 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2063 struct linkinfo_context *li_context = context;
2065 switch (nlattr->nla_type) {
2067 case QEMU_IFLA_INFO_KIND:
2068 li_context->name = NLA_DATA(nlattr);
2069 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2071 case QEMU_IFLA_INFO_SLAVE_KIND:
2072 li_context->slave_name = NLA_DATA(nlattr);
2073 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2076 case QEMU_IFLA_INFO_XSTATS:
2077 /* FIXME: only used by CAN */
2080 case QEMU_IFLA_INFO_DATA:
2081 if (strncmp(li_context->name, "bridge",
2082 li_context->len) == 0) {
2083 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2086 host_to_target_data_bridge_nlattr);
2088 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2091 case QEMU_IFLA_INFO_SLAVE_DATA:
2092 if (strncmp(li_context->slave_name, "bridge",
2093 li_context->slave_len) == 0) {
2094 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2097 host_to_target_slave_data_bridge_nlattr);
2099 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2100 li_context->slave_name);
2104 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2111 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2117 switch (nlattr->nla_type) {
2118 case QEMU_IFLA_INET_CONF:
2119 u32 = NLA_DATA(nlattr);
2120 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2122 u32[i] = tswap32(u32[i]);
2126 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2131 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2136 struct ifla_cacheinfo *ci;
2139 switch (nlattr->nla_type) {
2141 case QEMU_IFLA_INET6_TOKEN:
2144 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2147 case QEMU_IFLA_INET6_FLAGS:
2148 u32 = NLA_DATA(nlattr);
2149 *u32 = tswap32(*u32);
2152 case QEMU_IFLA_INET6_CONF:
2153 u32 = NLA_DATA(nlattr);
2154 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2156 u32[i] = tswap32(u32[i]);
2159 /* ifla_cacheinfo */
2160 case QEMU_IFLA_INET6_CACHEINFO:
2161 ci = NLA_DATA(nlattr);
2162 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2163 ci->tstamp = tswap32(ci->tstamp);
2164 ci->reachable_time = tswap32(ci->reachable_time);
2165 ci->retrans_time = tswap32(ci->retrans_time);
2168 case QEMU_IFLA_INET6_STATS:
2169 case QEMU_IFLA_INET6_ICMP6STATS:
2170 u64 = NLA_DATA(nlattr);
2171 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2173 u64[i] = tswap64(u64[i]);
2177 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2182 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2185 switch (nlattr->nla_type) {
2187 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2189 host_to_target_data_inet_nlattr);
2191 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2193 host_to_target_data_inet6_nlattr);
2195 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2201 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2204 struct rtnl_link_stats *st;
2205 struct rtnl_link_stats64 *st64;
2206 struct rtnl_link_ifmap *map;
2207 struct linkinfo_context li_context;
2209 switch (rtattr->rta_type) {
2211 case QEMU_IFLA_ADDRESS:
2212 case QEMU_IFLA_BROADCAST:
2214 case QEMU_IFLA_IFNAME:
2215 case QEMU_IFLA_QDISC:
2218 case QEMU_IFLA_OPERSTATE:
2219 case QEMU_IFLA_LINKMODE:
2220 case QEMU_IFLA_CARRIER:
2221 case QEMU_IFLA_PROTO_DOWN:
2225 case QEMU_IFLA_LINK:
2226 case QEMU_IFLA_WEIGHT:
2227 case QEMU_IFLA_TXQLEN:
2228 case QEMU_IFLA_CARRIER_CHANGES:
2229 case QEMU_IFLA_NUM_RX_QUEUES:
2230 case QEMU_IFLA_NUM_TX_QUEUES:
2231 case QEMU_IFLA_PROMISCUITY:
2232 case QEMU_IFLA_EXT_MASK:
2233 case QEMU_IFLA_LINK_NETNSID:
2234 case QEMU_IFLA_GROUP:
2235 case QEMU_IFLA_MASTER:
2236 case QEMU_IFLA_NUM_VF:
2237 u32 = RTA_DATA(rtattr);
2238 *u32 = tswap32(*u32);
2240 /* struct rtnl_link_stats */
2241 case QEMU_IFLA_STATS:
2242 st = RTA_DATA(rtattr);
2243 st->rx_packets = tswap32(st->rx_packets);
2244 st->tx_packets = tswap32(st->tx_packets);
2245 st->rx_bytes = tswap32(st->rx_bytes);
2246 st->tx_bytes = tswap32(st->tx_bytes);
2247 st->rx_errors = tswap32(st->rx_errors);
2248 st->tx_errors = tswap32(st->tx_errors);
2249 st->rx_dropped = tswap32(st->rx_dropped);
2250 st->tx_dropped = tswap32(st->tx_dropped);
2251 st->multicast = tswap32(st->multicast);
2252 st->collisions = tswap32(st->collisions);
2254 /* detailed rx_errors: */
2255 st->rx_length_errors = tswap32(st->rx_length_errors);
2256 st->rx_over_errors = tswap32(st->rx_over_errors);
2257 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2258 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2259 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2260 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2262 /* detailed tx_errors */
2263 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2264 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2265 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2266 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2267 st->tx_window_errors = tswap32(st->tx_window_errors);
2270 st->rx_compressed = tswap32(st->rx_compressed);
2271 st->tx_compressed = tswap32(st->tx_compressed);
2273 /* struct rtnl_link_stats64 */
2274 case QEMU_IFLA_STATS64:
2275 st64 = RTA_DATA(rtattr);
2276 st64->rx_packets = tswap64(st64->rx_packets);
2277 st64->tx_packets = tswap64(st64->tx_packets);
2278 st64->rx_bytes = tswap64(st64->rx_bytes);
2279 st64->tx_bytes = tswap64(st64->tx_bytes);
2280 st64->rx_errors = tswap64(st64->rx_errors);
2281 st64->tx_errors = tswap64(st64->tx_errors);
2282 st64->rx_dropped = tswap64(st64->rx_dropped);
2283 st64->tx_dropped = tswap64(st64->tx_dropped);
2284 st64->multicast = tswap64(st64->multicast);
2285 st64->collisions = tswap64(st64->collisions);
2287 /* detailed rx_errors: */
2288 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2289 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2290 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2291 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2292 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2293 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2295 /* detailed tx_errors */
2296 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2297 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2298 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2299 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2300 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2303 st64->rx_compressed = tswap64(st64->rx_compressed);
2304 st64->tx_compressed = tswap64(st64->tx_compressed);
2306 /* struct rtnl_link_ifmap */
2308 map = RTA_DATA(rtattr);
2309 map->mem_start = tswap64(map->mem_start);
2310 map->mem_end = tswap64(map->mem_end);
2311 map->base_addr = tswap64(map->base_addr);
2312 map->irq = tswap16(map->irq);
2315 case QEMU_IFLA_LINKINFO:
2316 memset(&li_context, 0, sizeof(li_context));
2317 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2319 host_to_target_data_linkinfo_nlattr);
2320 case QEMU_IFLA_AF_SPEC:
2321 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2323 host_to_target_data_spec_nlattr);
2325 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2331 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2334 struct ifa_cacheinfo *ci;
2336 switch (rtattr->rta_type) {
2337 /* binary: depends on family type */
2347 u32 = RTA_DATA(rtattr);
2348 *u32 = tswap32(*u32);
2350 /* struct ifa_cacheinfo */
2352 ci = RTA_DATA(rtattr);
2353 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2354 ci->ifa_valid = tswap32(ci->ifa_valid);
2355 ci->cstamp = tswap32(ci->cstamp);
2356 ci->tstamp = tswap32(ci->tstamp);
2359 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2365 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2368 switch (rtattr->rta_type) {
2369 /* binary: depends on family type */
2378 u32 = RTA_DATA(rtattr);
2379 *u32 = tswap32(*u32);
2382 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2388 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2389 uint32_t rtattr_len)
2391 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2392 host_to_target_data_link_rtattr);
2395 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2396 uint32_t rtattr_len)
2398 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2399 host_to_target_data_addr_rtattr);
2402 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2403 uint32_t rtattr_len)
2405 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2406 host_to_target_data_route_rtattr);
2409 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2412 struct ifinfomsg *ifi;
2413 struct ifaddrmsg *ifa;
2416 nlmsg_len = nlh->nlmsg_len;
2417 switch (nlh->nlmsg_type) {
2421 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2422 ifi = NLMSG_DATA(nlh);
2423 ifi->ifi_type = tswap16(ifi->ifi_type);
2424 ifi->ifi_index = tswap32(ifi->ifi_index);
2425 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2426 ifi->ifi_change = tswap32(ifi->ifi_change);
2427 host_to_target_link_rtattr(IFLA_RTA(ifi),
2428 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2434 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2435 ifa = NLMSG_DATA(nlh);
2436 ifa->ifa_index = tswap32(ifa->ifa_index);
2437 host_to_target_addr_rtattr(IFA_RTA(ifa),
2438 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2444 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2445 rtm = NLMSG_DATA(nlh);
2446 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2447 host_to_target_route_rtattr(RTM_RTA(rtm),
2448 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2452 return -TARGET_EINVAL;
2457 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2460 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2463 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2465 abi_long (*target_to_host_rtattr)
2470 while (len >= sizeof(struct rtattr)) {
2471 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2472 tswap16(rtattr->rta_len) > len) {
2475 rtattr->rta_len = tswap16(rtattr->rta_len);
2476 rtattr->rta_type = tswap16(rtattr->rta_type);
2477 ret = target_to_host_rtattr(rtattr);
2481 len -= RTA_ALIGN(rtattr->rta_len);
2482 rtattr = (struct rtattr *)(((char *)rtattr) +
2483 RTA_ALIGN(rtattr->rta_len));
2488 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2490 switch (rtattr->rta_type) {
2492 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2498 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2500 switch (rtattr->rta_type) {
2501 /* binary: depends on family type */
2506 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2512 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2515 switch (rtattr->rta_type) {
2516 /* binary: depends on family type */
2523 u32 = RTA_DATA(rtattr);
2524 *u32 = tswap32(*u32);
2527 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2533 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2534 uint32_t rtattr_len)
2536 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2537 target_to_host_data_link_rtattr);
2540 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2541 uint32_t rtattr_len)
2543 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2544 target_to_host_data_addr_rtattr);
2547 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2548 uint32_t rtattr_len)
2550 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2551 target_to_host_data_route_rtattr);
2554 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2556 struct ifinfomsg *ifi;
2557 struct ifaddrmsg *ifa;
2560 switch (nlh->nlmsg_type) {
2565 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2566 ifi = NLMSG_DATA(nlh);
2567 ifi->ifi_type = tswap16(ifi->ifi_type);
2568 ifi->ifi_index = tswap32(ifi->ifi_index);
2569 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2570 ifi->ifi_change = tswap32(ifi->ifi_change);
2571 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2572 NLMSG_LENGTH(sizeof(*ifi)));
2578 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2579 ifa = NLMSG_DATA(nlh);
2580 ifa->ifa_index = tswap32(ifa->ifa_index);
2581 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2582 NLMSG_LENGTH(sizeof(*ifa)));
2589 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2590 rtm = NLMSG_DATA(nlh);
2591 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2592 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2593 NLMSG_LENGTH(sizeof(*rtm)));
2597 return -TARGET_EOPNOTSUPP;
2602 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2604 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2606 #endif /* CONFIG_RTNETLINK */
2608 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2610 switch (nlh->nlmsg_type) {
2612 gemu_log("Unknown host audit message type %d\n",
2614 return -TARGET_EINVAL;
2619 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2622 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2625 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2627 switch (nlh->nlmsg_type) {
2629 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2630 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2633 gemu_log("Unknown target audit message type %d\n",
2635 return -TARGET_EINVAL;
2641 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2643 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2646 /* do_setsockopt() Must return target values and target errnos. */
2647 static abi_long do_setsockopt(int sockfd, int level, int optname,
2648 abi_ulong optval_addr, socklen_t optlen)
2652 struct ip_mreqn *ip_mreq;
2653 struct ip_mreq_source *ip_mreq_source;
2657 /* TCP options all take an 'int' value. */
2658 if (optlen < sizeof(uint32_t))
2659 return -TARGET_EINVAL;
2661 if (get_user_u32(val, optval_addr))
2662 return -TARGET_EFAULT;
2663 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2670 case IP_ROUTER_ALERT:
2674 case IP_MTU_DISCOVER:
2680 case IP_MULTICAST_TTL:
2681 case IP_MULTICAST_LOOP:
2683 if (optlen >= sizeof(uint32_t)) {
2684 if (get_user_u32(val, optval_addr))
2685 return -TARGET_EFAULT;
2686 } else if (optlen >= 1) {
2687 if (get_user_u8(val, optval_addr))
2688 return -TARGET_EFAULT;
2690 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2692 case IP_ADD_MEMBERSHIP:
2693 case IP_DROP_MEMBERSHIP:
2694 if (optlen < sizeof (struct target_ip_mreq) ||
2695 optlen > sizeof (struct target_ip_mreqn))
2696 return -TARGET_EINVAL;
2698 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2699 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2700 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2703 case IP_BLOCK_SOURCE:
2704 case IP_UNBLOCK_SOURCE:
2705 case IP_ADD_SOURCE_MEMBERSHIP:
2706 case IP_DROP_SOURCE_MEMBERSHIP:
2707 if (optlen != sizeof (struct target_ip_mreq_source))
2708 return -TARGET_EINVAL;
2710 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2711 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2712 unlock_user (ip_mreq_source, optval_addr, 0);
2721 case IPV6_MTU_DISCOVER:
2724 case IPV6_RECVPKTINFO:
2726 if (optlen < sizeof(uint32_t)) {
2727 return -TARGET_EINVAL;
2729 if (get_user_u32(val, optval_addr)) {
2730 return -TARGET_EFAULT;
2732 ret = get_errno(setsockopt(sockfd, level, optname,
2733 &val, sizeof(val)));
2742 /* struct icmp_filter takes an u32 value */
2743 if (optlen < sizeof(uint32_t)) {
2744 return -TARGET_EINVAL;
2747 if (get_user_u32(val, optval_addr)) {
2748 return -TARGET_EFAULT;
2750 ret = get_errno(setsockopt(sockfd, level, optname,
2751 &val, sizeof(val)));
2758 case TARGET_SOL_SOCKET:
2760 case TARGET_SO_RCVTIMEO:
2764 optname = SO_RCVTIMEO;
2767 if (optlen != sizeof(struct target_timeval)) {
2768 return -TARGET_EINVAL;
2771 if (copy_from_user_timeval(&tv, optval_addr)) {
2772 return -TARGET_EFAULT;
2775 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2779 case TARGET_SO_SNDTIMEO:
2780 optname = SO_SNDTIMEO;
2782 case TARGET_SO_ATTACH_FILTER:
2784 struct target_sock_fprog *tfprog;
2785 struct target_sock_filter *tfilter;
2786 struct sock_fprog fprog;
2787 struct sock_filter *filter;
2790 if (optlen != sizeof(*tfprog)) {
2791 return -TARGET_EINVAL;
2793 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2794 return -TARGET_EFAULT;
2796 if (!lock_user_struct(VERIFY_READ, tfilter,
2797 tswapal(tfprog->filter), 0)) {
2798 unlock_user_struct(tfprog, optval_addr, 1);
2799 return -TARGET_EFAULT;
2802 fprog.len = tswap16(tfprog->len);
2803 filter = g_try_new(struct sock_filter, fprog.len);
2804 if (filter == NULL) {
2805 unlock_user_struct(tfilter, tfprog->filter, 1);
2806 unlock_user_struct(tfprog, optval_addr, 1);
2807 return -TARGET_ENOMEM;
2809 for (i = 0; i < fprog.len; i++) {
2810 filter[i].code = tswap16(tfilter[i].code);
2811 filter[i].jt = tfilter[i].jt;
2812 filter[i].jf = tfilter[i].jf;
2813 filter[i].k = tswap32(tfilter[i].k);
2815 fprog.filter = filter;
2817 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2818 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2821 unlock_user_struct(tfilter, tfprog->filter, 1);
2822 unlock_user_struct(tfprog, optval_addr, 1);
2825 case TARGET_SO_BINDTODEVICE:
2827 char *dev_ifname, *addr_ifname;
2829 if (optlen > IFNAMSIZ - 1) {
2830 optlen = IFNAMSIZ - 1;
2832 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2834 return -TARGET_EFAULT;
2836 optname = SO_BINDTODEVICE;
2837 addr_ifname = alloca(IFNAMSIZ);
2838 memcpy(addr_ifname, dev_ifname, optlen);
2839 addr_ifname[optlen] = 0;
2840 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2841 addr_ifname, optlen));
2842 unlock_user (dev_ifname, optval_addr, 0);
2845 /* Options with 'int' argument. */
2846 case TARGET_SO_DEBUG:
2849 case TARGET_SO_REUSEADDR:
2850 optname = SO_REUSEADDR;
2852 case TARGET_SO_TYPE:
2855 case TARGET_SO_ERROR:
2858 case TARGET_SO_DONTROUTE:
2859 optname = SO_DONTROUTE;
2861 case TARGET_SO_BROADCAST:
2862 optname = SO_BROADCAST;
2864 case TARGET_SO_SNDBUF:
2865 optname = SO_SNDBUF;
2867 case TARGET_SO_SNDBUFFORCE:
2868 optname = SO_SNDBUFFORCE;
2870 case TARGET_SO_RCVBUF:
2871 optname = SO_RCVBUF;
2873 case TARGET_SO_RCVBUFFORCE:
2874 optname = SO_RCVBUFFORCE;
2876 case TARGET_SO_KEEPALIVE:
2877 optname = SO_KEEPALIVE;
2879 case TARGET_SO_OOBINLINE:
2880 optname = SO_OOBINLINE;
2882 case TARGET_SO_NO_CHECK:
2883 optname = SO_NO_CHECK;
2885 case TARGET_SO_PRIORITY:
2886 optname = SO_PRIORITY;
2889 case TARGET_SO_BSDCOMPAT:
2890 optname = SO_BSDCOMPAT;
2893 case TARGET_SO_PASSCRED:
2894 optname = SO_PASSCRED;
2896 case TARGET_SO_PASSSEC:
2897 optname = SO_PASSSEC;
2899 case TARGET_SO_TIMESTAMP:
2900 optname = SO_TIMESTAMP;
2902 case TARGET_SO_RCVLOWAT:
2903 optname = SO_RCVLOWAT;
2909 if (optlen < sizeof(uint32_t))
2910 return -TARGET_EINVAL;
2912 if (get_user_u32(val, optval_addr))
2913 return -TARGET_EFAULT;
2914 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2918 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2919 ret = -TARGET_ENOPROTOOPT;
2924 /* do_getsockopt() Must return target values and target errnos. */
2925 static abi_long do_getsockopt(int sockfd, int level, int optname,
2926 abi_ulong optval_addr, abi_ulong optlen)
2933 case TARGET_SOL_SOCKET:
2936 /* These don't just return a single integer */
2937 case TARGET_SO_LINGER:
2938 case TARGET_SO_RCVTIMEO:
2939 case TARGET_SO_SNDTIMEO:
2940 case TARGET_SO_PEERNAME:
2942 case TARGET_SO_PEERCRED: {
2945 struct target_ucred *tcr;
2947 if (get_user_u32(len, optlen)) {
2948 return -TARGET_EFAULT;
2951 return -TARGET_EINVAL;
2955 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2963 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2964 return -TARGET_EFAULT;
2966 __put_user(cr.pid, &tcr->pid);
2967 __put_user(cr.uid, &tcr->uid);
2968 __put_user(cr.gid, &tcr->gid);
2969 unlock_user_struct(tcr, optval_addr, 1);
2970 if (put_user_u32(len, optlen)) {
2971 return -TARGET_EFAULT;
2975 /* Options with 'int' argument. */
2976 case TARGET_SO_DEBUG:
2979 case TARGET_SO_REUSEADDR:
2980 optname = SO_REUSEADDR;
2982 case TARGET_SO_TYPE:
2985 case TARGET_SO_ERROR:
2988 case TARGET_SO_DONTROUTE:
2989 optname = SO_DONTROUTE;
2991 case TARGET_SO_BROADCAST:
2992 optname = SO_BROADCAST;
2994 case TARGET_SO_SNDBUF:
2995 optname = SO_SNDBUF;
2997 case TARGET_SO_RCVBUF:
2998 optname = SO_RCVBUF;
3000 case TARGET_SO_KEEPALIVE:
3001 optname = SO_KEEPALIVE;
3003 case TARGET_SO_OOBINLINE:
3004 optname = SO_OOBINLINE;
3006 case TARGET_SO_NO_CHECK:
3007 optname = SO_NO_CHECK;
3009 case TARGET_SO_PRIORITY:
3010 optname = SO_PRIORITY;
3013 case TARGET_SO_BSDCOMPAT:
3014 optname = SO_BSDCOMPAT;
3017 case TARGET_SO_PASSCRED:
3018 optname = SO_PASSCRED;
3020 case TARGET_SO_TIMESTAMP:
3021 optname = SO_TIMESTAMP;
3023 case TARGET_SO_RCVLOWAT:
3024 optname = SO_RCVLOWAT;
3026 case TARGET_SO_ACCEPTCONN:
3027 optname = SO_ACCEPTCONN;
3034 /* TCP options all take an 'int' value. */
3036 if (get_user_u32(len, optlen))
3037 return -TARGET_EFAULT;
3039 return -TARGET_EINVAL;
3041 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3044 if (optname == SO_TYPE) {
3045 val = host_to_target_sock_type(val);
3050 if (put_user_u32(val, optval_addr))
3051 return -TARGET_EFAULT;
3053 if (put_user_u8(val, optval_addr))
3054 return -TARGET_EFAULT;
3056 if (put_user_u32(len, optlen))
3057 return -TARGET_EFAULT;
3064 case IP_ROUTER_ALERT:
3068 case IP_MTU_DISCOVER:
3074 case IP_MULTICAST_TTL:
3075 case IP_MULTICAST_LOOP:
3076 if (get_user_u32(len, optlen))
3077 return -TARGET_EFAULT;
3079 return -TARGET_EINVAL;
3081 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3084 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3086 if (put_user_u32(len, optlen)
3087 || put_user_u8(val, optval_addr))
3088 return -TARGET_EFAULT;
3090 if (len > sizeof(int))
3092 if (put_user_u32(len, optlen)
3093 || put_user_u32(val, optval_addr))
3094 return -TARGET_EFAULT;
3098 ret = -TARGET_ENOPROTOOPT;
3104 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3106 ret = -TARGET_EOPNOTSUPP;
3112 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3113 abi_ulong count, int copy)
3115 struct target_iovec *target_vec;
3117 abi_ulong total_len, max_len;
3120 bool bad_address = false;
3126 if (count > IOV_MAX) {
3131 vec = g_try_new0(struct iovec, count);
3137 target_vec = lock_user(VERIFY_READ, target_addr,
3138 count * sizeof(struct target_iovec), 1);
3139 if (target_vec == NULL) {
3144 /* ??? If host page size > target page size, this will result in a
3145 value larger than what we can actually support. */
3146 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3149 for (i = 0; i < count; i++) {
3150 abi_ulong base = tswapal(target_vec[i].iov_base);
3151 abi_long len = tswapal(target_vec[i].iov_len);
3156 } else if (len == 0) {
3157 /* Zero length pointer is ignored. */
3158 vec[i].iov_base = 0;
3160 vec[i].iov_base = lock_user(type, base, len, copy);
3161 /* If the first buffer pointer is bad, this is a fault. But
3162 * subsequent bad buffers will result in a partial write; this
3163 * is realized by filling the vector with null pointers and
3165 if (!vec[i].iov_base) {
3176 if (len > max_len - total_len) {
3177 len = max_len - total_len;
3180 vec[i].iov_len = len;
3184 unlock_user(target_vec, target_addr, 0);
3189 if (tswapal(target_vec[i].iov_len) > 0) {
3190 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3193 unlock_user(target_vec, target_addr, 0);
3200 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3201 abi_ulong count, int copy)
3203 struct target_iovec *target_vec;
3206 target_vec = lock_user(VERIFY_READ, target_addr,
3207 count * sizeof(struct target_iovec), 1);
3209 for (i = 0; i < count; i++) {
3210 abi_ulong base = tswapal(target_vec[i].iov_base);
3211 abi_long len = tswapal(target_vec[i].iov_len);
3215 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3217 unlock_user(target_vec, target_addr, 0);
3223 static inline int target_to_host_sock_type(int *type)
3226 int target_type = *type;
3228 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3229 case TARGET_SOCK_DGRAM:
3230 host_type = SOCK_DGRAM;
3232 case TARGET_SOCK_STREAM:
3233 host_type = SOCK_STREAM;
3236 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3239 if (target_type & TARGET_SOCK_CLOEXEC) {
3240 #if defined(SOCK_CLOEXEC)
3241 host_type |= SOCK_CLOEXEC;
3243 return -TARGET_EINVAL;
3246 if (target_type & TARGET_SOCK_NONBLOCK) {
3247 #if defined(SOCK_NONBLOCK)
3248 host_type |= SOCK_NONBLOCK;
3249 #elif !defined(O_NONBLOCK)
3250 return -TARGET_EINVAL;
3257 /* Try to emulate socket type flags after socket creation. */
3258 static int sock_flags_fixup(int fd, int target_type)
3260 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3261 if (target_type & TARGET_SOCK_NONBLOCK) {
3262 int flags = fcntl(fd, F_GETFL);
3263 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3265 return -TARGET_EINVAL;
3272 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3273 abi_ulong target_addr,
3276 struct sockaddr *addr = host_addr;
3277 struct target_sockaddr *target_saddr;
3279 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3280 if (!target_saddr) {
3281 return -TARGET_EFAULT;
3284 memcpy(addr, target_saddr, len);
3285 addr->sa_family = tswap16(target_saddr->sa_family);
3286 /* spkt_protocol is big-endian */
3288 unlock_user(target_saddr, target_addr, 0);
3292 static TargetFdTrans target_packet_trans = {
3293 .target_to_host_addr = packet_target_to_host_sockaddr,
3296 #ifdef CONFIG_RTNETLINK
3297 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3301 ret = target_to_host_nlmsg_route(buf, len);
3309 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3313 ret = host_to_target_nlmsg_route(buf, len);
3321 static TargetFdTrans target_netlink_route_trans = {
3322 .target_to_host_data = netlink_route_target_to_host,
3323 .host_to_target_data = netlink_route_host_to_target,
3325 #endif /* CONFIG_RTNETLINK */
3327 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3331 ret = target_to_host_nlmsg_audit(buf, len);
3339 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3343 ret = host_to_target_nlmsg_audit(buf, len);
3351 static TargetFdTrans target_netlink_audit_trans = {
3352 .target_to_host_data = netlink_audit_target_to_host,
3353 .host_to_target_data = netlink_audit_host_to_target,
3356 /* do_socket() Must return target values and target errnos. */
3357 static abi_long do_socket(int domain, int type, int protocol)
3359 int target_type = type;
3362 ret = target_to_host_sock_type(&type);
3367 if (domain == PF_NETLINK && !(
3368 #ifdef CONFIG_RTNETLINK
3369 protocol == NETLINK_ROUTE ||
3371 protocol == NETLINK_KOBJECT_UEVENT ||
3372 protocol == NETLINK_AUDIT)) {
3373 return -EPFNOSUPPORT;
3376 if (domain == AF_PACKET ||
3377 (domain == AF_INET && type == SOCK_PACKET)) {
3378 protocol = tswap16(protocol);
3381 ret = get_errno(socket(domain, type, protocol));
3383 ret = sock_flags_fixup(ret, target_type);
3384 if (type == SOCK_PACKET) {
3385 /* Manage an obsolete case :
3386 * if socket type is SOCK_PACKET, bind by name
3388 fd_trans_register(ret, &target_packet_trans);
3389 } else if (domain == PF_NETLINK) {
3391 #ifdef CONFIG_RTNETLINK
3393 fd_trans_register(ret, &target_netlink_route_trans);
3396 case NETLINK_KOBJECT_UEVENT:
3397 /* nothing to do: messages are strings */
3400 fd_trans_register(ret, &target_netlink_audit_trans);
3403 g_assert_not_reached();
3410 /* do_bind() Must return target values and target errnos. */
3411 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3417 if ((int)addrlen < 0) {
3418 return -TARGET_EINVAL;
3421 addr = alloca(addrlen+1);
3423 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3427 return get_errno(bind(sockfd, addr, addrlen));
3430 /* do_connect() Must return target values and target errnos. */
3431 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3437 if ((int)addrlen < 0) {
3438 return -TARGET_EINVAL;
3441 addr = alloca(addrlen+1);
3443 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3447 return get_errno(safe_connect(sockfd, addr, addrlen));
3450 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3451 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3452 int flags, int send)
3458 abi_ulong target_vec;
3460 if (msgp->msg_name) {
3461 msg.msg_namelen = tswap32(msgp->msg_namelen);
3462 msg.msg_name = alloca(msg.msg_namelen+1);
3463 ret = target_to_host_sockaddr(fd, msg.msg_name,
3464 tswapal(msgp->msg_name),
3466 if (ret == -TARGET_EFAULT) {
3467 /* For connected sockets msg_name and msg_namelen must
3468 * be ignored, so returning EFAULT immediately is wrong.
3469 * Instead, pass a bad msg_name to the host kernel, and
3470 * let it decide whether to return EFAULT or not.
3472 msg.msg_name = (void *)-1;
3477 msg.msg_name = NULL;
3478 msg.msg_namelen = 0;
3480 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3481 msg.msg_control = alloca(msg.msg_controllen);
3482 msg.msg_flags = tswap32(msgp->msg_flags);
3484 count = tswapal(msgp->msg_iovlen);
3485 target_vec = tswapal(msgp->msg_iov);
3487 if (count > IOV_MAX) {
3488 /* sendrcvmsg returns a different errno for this condition than
3489 * readv/writev, so we must catch it here before lock_iovec() does.
3491 ret = -TARGET_EMSGSIZE;
3495 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3496 target_vec, count, send);
3498 ret = -host_to_target_errno(errno);
3501 msg.msg_iovlen = count;
3505 if (fd_trans_target_to_host_data(fd)) {
3508 host_msg = g_malloc(msg.msg_iov->iov_len);
3509 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3510 ret = fd_trans_target_to_host_data(fd)(host_msg,
3511 msg.msg_iov->iov_len);
3513 msg.msg_iov->iov_base = host_msg;
3514 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3518 ret = target_to_host_cmsg(&msg, msgp);
3520 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3524 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3525 if (!is_error(ret)) {
3527 if (fd_trans_host_to_target_data(fd)) {
3528 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3531 ret = host_to_target_cmsg(msgp, &msg);
3533 if (!is_error(ret)) {
3534 msgp->msg_namelen = tswap32(msg.msg_namelen);
3535 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3536 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3537 msg.msg_name, msg.msg_namelen);
3549 unlock_iovec(vec, target_vec, count, !send);
3554 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3555 int flags, int send)
3558 struct target_msghdr *msgp;
3560 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3564 return -TARGET_EFAULT;
3566 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3567 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3571 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3572 * so it might not have this *mmsg-specific flag either.
3574 #ifndef MSG_WAITFORONE
3575 #define MSG_WAITFORONE 0x10000
3578 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3579 unsigned int vlen, unsigned int flags,
3582 struct target_mmsghdr *mmsgp;
3586 if (vlen > UIO_MAXIOV) {
3590 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3592 return -TARGET_EFAULT;
3595 for (i = 0; i < vlen; i++) {
3596 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3597 if (is_error(ret)) {
3600 mmsgp[i].msg_len = tswap32(ret);
3601 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3602 if (flags & MSG_WAITFORONE) {
3603 flags |= MSG_DONTWAIT;
3607 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3609 /* Return number of datagrams sent if we sent any at all;
3610 * otherwise return the error.
3618 /* do_accept4() Must return target values and target errnos. */
3619 static abi_long do_accept4(int fd, abi_ulong target_addr,
3620 abi_ulong target_addrlen_addr, int flags)
3627 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3629 if (target_addr == 0) {
3630 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3633 /* linux returns EINVAL if addrlen pointer is invalid */
3634 if (get_user_u32(addrlen, target_addrlen_addr))
3635 return -TARGET_EINVAL;
3637 if ((int)addrlen < 0) {
3638 return -TARGET_EINVAL;
3641 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3642 return -TARGET_EINVAL;
3644 addr = alloca(addrlen);
3646 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3647 if (!is_error(ret)) {
3648 host_to_target_sockaddr(target_addr, addr, addrlen);
3649 if (put_user_u32(addrlen, target_addrlen_addr))
3650 ret = -TARGET_EFAULT;
3655 /* do_getpeername() Must return target values and target errnos. */
3656 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3657 abi_ulong target_addrlen_addr)
3663 if (get_user_u32(addrlen, target_addrlen_addr))
3664 return -TARGET_EFAULT;
3666 if ((int)addrlen < 0) {
3667 return -TARGET_EINVAL;
3670 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3671 return -TARGET_EFAULT;
3673 addr = alloca(addrlen);
3675 ret = get_errno(getpeername(fd, addr, &addrlen));
3676 if (!is_error(ret)) {
3677 host_to_target_sockaddr(target_addr, addr, addrlen);
3678 if (put_user_u32(addrlen, target_addrlen_addr))
3679 ret = -TARGET_EFAULT;
3684 /* do_getsockname() Must return target values and target errnos. */
3685 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3686 abi_ulong target_addrlen_addr)
3692 if (get_user_u32(addrlen, target_addrlen_addr))
3693 return -TARGET_EFAULT;
3695 if ((int)addrlen < 0) {
3696 return -TARGET_EINVAL;
3699 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3700 return -TARGET_EFAULT;
3702 addr = alloca(addrlen);
3704 ret = get_errno(getsockname(fd, addr, &addrlen));
3705 if (!is_error(ret)) {
3706 host_to_target_sockaddr(target_addr, addr, addrlen);
3707 if (put_user_u32(addrlen, target_addrlen_addr))
3708 ret = -TARGET_EFAULT;
3713 /* do_socketpair() Must return target values and target errnos. */
3714 static abi_long do_socketpair(int domain, int type, int protocol,
3715 abi_ulong target_tab_addr)
3720 target_to_host_sock_type(&type);
3722 ret = get_errno(socketpair(domain, type, protocol, tab));
3723 if (!is_error(ret)) {
3724 if (put_user_s32(tab[0], target_tab_addr)
3725 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3726 ret = -TARGET_EFAULT;
3731 /* do_sendto() Must return target values and target errnos. */
3732 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3733 abi_ulong target_addr, socklen_t addrlen)
3737 void *copy_msg = NULL;
3740 if ((int)addrlen < 0) {
3741 return -TARGET_EINVAL;
3744 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3746 return -TARGET_EFAULT;
3747 if (fd_trans_target_to_host_data(fd)) {
3748 copy_msg = host_msg;
3749 host_msg = g_malloc(len);
3750 memcpy(host_msg, copy_msg, len);
3751 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3757 addr = alloca(addrlen+1);
3758 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3762 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3764 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3769 host_msg = copy_msg;
3771 unlock_user(host_msg, msg, 0);
3775 /* do_recvfrom() Must return target values and target errnos. */
3776 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3777 abi_ulong target_addr,
3778 abi_ulong target_addrlen)
3785 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3787 return -TARGET_EFAULT;
3789 if (get_user_u32(addrlen, target_addrlen)) {
3790 ret = -TARGET_EFAULT;
3793 if ((int)addrlen < 0) {
3794 ret = -TARGET_EINVAL;
3797 addr = alloca(addrlen);
3798 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3801 addr = NULL; /* To keep compiler quiet. */
3802 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3804 if (!is_error(ret)) {
3805 if (fd_trans_host_to_target_data(fd)) {
3806 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3809 host_to_target_sockaddr(target_addr, addr, addrlen);
3810 if (put_user_u32(addrlen, target_addrlen)) {
3811 ret = -TARGET_EFAULT;
3815 unlock_user(host_msg, msg, len);
3818 unlock_user(host_msg, msg, 0);
3823 #ifdef TARGET_NR_socketcall
3824 /* do_socketcall() Must return target values and target errnos. */
3825 static abi_long do_socketcall(int num, abi_ulong vptr)
3827 static const unsigned ac[] = { /* number of arguments per call */
3828 [SOCKOP_socket] = 3, /* domain, type, protocol */
3829 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3830 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3831 [SOCKOP_listen] = 2, /* sockfd, backlog */
3832 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3833 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3834 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3835 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3836 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3837 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3838 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3839 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3840 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3841 [SOCKOP_shutdown] = 2, /* sockfd, how */
3842 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3843 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3844 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3845 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3846 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3847 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3849 abi_long a[6]; /* max 6 args */
3851 /* first, collect the arguments in a[] according to ac[] */
3852 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3854 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3855 for (i = 0; i < ac[num]; ++i) {
3856 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3857 return -TARGET_EFAULT;
3862 /* now when we have the args, actually handle the call */
3864 case SOCKOP_socket: /* domain, type, protocol */
3865 return do_socket(a[0], a[1], a[2]);
3866 case SOCKOP_bind: /* sockfd, addr, addrlen */
3867 return do_bind(a[0], a[1], a[2]);
3868 case SOCKOP_connect: /* sockfd, addr, addrlen */
3869 return do_connect(a[0], a[1], a[2]);
3870 case SOCKOP_listen: /* sockfd, backlog */
3871 return get_errno(listen(a[0], a[1]));
3872 case SOCKOP_accept: /* sockfd, addr, addrlen */
3873 return do_accept4(a[0], a[1], a[2], 0);
3874 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3875 return do_accept4(a[0], a[1], a[2], a[3]);
3876 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3877 return do_getsockname(a[0], a[1], a[2]);
3878 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3879 return do_getpeername(a[0], a[1], a[2]);
3880 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3881 return do_socketpair(a[0], a[1], a[2], a[3]);
3882 case SOCKOP_send: /* sockfd, msg, len, flags */
3883 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3884 case SOCKOP_recv: /* sockfd, msg, len, flags */
3885 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3886 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3887 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3888 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3889 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3890 case SOCKOP_shutdown: /* sockfd, how */
3891 return get_errno(shutdown(a[0], a[1]));
3892 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3893 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3894 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3895 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3896 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3897 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3898 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3899 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3900 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3901 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3902 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3903 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3905 gemu_log("Unsupported socketcall: %d\n", num);
3906 return -TARGET_ENOSYS;
3911 #define N_SHM_REGIONS 32
3913 static struct shm_region {
3917 } shm_regions[N_SHM_REGIONS];
3919 #ifndef TARGET_SEMID64_DS
3920 /* asm-generic version of this struct */
3921 struct target_semid64_ds
3923 struct target_ipc_perm sem_perm;
3924 abi_ulong sem_otime;
3925 #if TARGET_ABI_BITS == 32
3926 abi_ulong __unused1;
3928 abi_ulong sem_ctime;
3929 #if TARGET_ABI_BITS == 32
3930 abi_ulong __unused2;
3932 abi_ulong sem_nsems;
3933 abi_ulong __unused3;
3934 abi_ulong __unused4;
3938 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3939 abi_ulong target_addr)
3941 struct target_ipc_perm *target_ip;
3942 struct target_semid64_ds *target_sd;
3944 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3945 return -TARGET_EFAULT;
3946 target_ip = &(target_sd->sem_perm);
3947 host_ip->__key = tswap32(target_ip->__key);
3948 host_ip->uid = tswap32(target_ip->uid);
3949 host_ip->gid = tswap32(target_ip->gid);
3950 host_ip->cuid = tswap32(target_ip->cuid);
3951 host_ip->cgid = tswap32(target_ip->cgid);
3952 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3953 host_ip->mode = tswap32(target_ip->mode);
3955 host_ip->mode = tswap16(target_ip->mode);
3957 #if defined(TARGET_PPC)
3958 host_ip->__seq = tswap32(target_ip->__seq);
3960 host_ip->__seq = tswap16(target_ip->__seq);
3962 unlock_user_struct(target_sd, target_addr, 0);
3966 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3967 struct ipc_perm *host_ip)
3969 struct target_ipc_perm *target_ip;
3970 struct target_semid64_ds *target_sd;
3972 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3973 return -TARGET_EFAULT;
3974 target_ip = &(target_sd->sem_perm);
3975 target_ip->__key = tswap32(host_ip->__key);
3976 target_ip->uid = tswap32(host_ip->uid);
3977 target_ip->gid = tswap32(host_ip->gid);
3978 target_ip->cuid = tswap32(host_ip->cuid);
3979 target_ip->cgid = tswap32(host_ip->cgid);
3980 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3981 target_ip->mode = tswap32(host_ip->mode);
3983 target_ip->mode = tswap16(host_ip->mode);
3985 #if defined(TARGET_PPC)
3986 target_ip->__seq = tswap32(host_ip->__seq);
3988 target_ip->__seq = tswap16(host_ip->__seq);
3990 unlock_user_struct(target_sd, target_addr, 1);
3994 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3995 abi_ulong target_addr)
3997 struct target_semid64_ds *target_sd;
3999 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4000 return -TARGET_EFAULT;
4001 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4002 return -TARGET_EFAULT;
4003 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4004 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4005 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4006 unlock_user_struct(target_sd, target_addr, 0);
4010 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4011 struct semid_ds *host_sd)
4013 struct target_semid64_ds *target_sd;
4015 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4016 return -TARGET_EFAULT;
4017 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4018 return -TARGET_EFAULT;
4019 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4020 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4021 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4022 unlock_user_struct(target_sd, target_addr, 1);
4026 struct target_seminfo {
4039 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4040 struct seminfo *host_seminfo)
4042 struct target_seminfo *target_seminfo;
4043 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4044 return -TARGET_EFAULT;
4045 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4046 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4047 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4048 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4049 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4050 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4051 __put_user(host_seminfo->semume, &target_seminfo->semume);
4052 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4053 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4054 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4055 unlock_user_struct(target_seminfo, target_addr, 1);
4061 struct semid_ds *buf;
4062 unsigned short *array;
4063 struct seminfo *__buf;
4066 union target_semun {
4073 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4074 abi_ulong target_addr)
4077 unsigned short *array;
4079 struct semid_ds semid_ds;
4082 semun.buf = &semid_ds;
4084 ret = semctl(semid, 0, IPC_STAT, semun);
4086 return get_errno(ret);
4088 nsems = semid_ds.sem_nsems;
4090 *host_array = g_try_new(unsigned short, nsems);
4092 return -TARGET_ENOMEM;
4094 array = lock_user(VERIFY_READ, target_addr,
4095 nsems*sizeof(unsigned short), 1);
4097 g_free(*host_array);
4098 return -TARGET_EFAULT;
4101 for(i=0; i<nsems; i++) {
4102 __get_user((*host_array)[i], &array[i]);
4104 unlock_user(array, target_addr, 0);
4109 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4110 unsigned short **host_array)
4113 unsigned short *array;
4115 struct semid_ds semid_ds;
4118 semun.buf = &semid_ds;
4120 ret = semctl(semid, 0, IPC_STAT, semun);
4122 return get_errno(ret);
4124 nsems = semid_ds.sem_nsems;
4126 array = lock_user(VERIFY_WRITE, target_addr,
4127 nsems*sizeof(unsigned short), 0);
4129 return -TARGET_EFAULT;
4131 for(i=0; i<nsems; i++) {
4132 __put_user((*host_array)[i], &array[i]);
4134 g_free(*host_array);
4135 unlock_user(array, target_addr, 1);
4140 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4141 abi_ulong target_arg)
4143 union target_semun target_su = { .buf = target_arg };
4145 struct semid_ds dsarg;
4146 unsigned short *array = NULL;
4147 struct seminfo seminfo;
4148 abi_long ret = -TARGET_EINVAL;
4155 /* In 64 bit cross-endian situations, we will erroneously pick up
4156 * the wrong half of the union for the "val" element. To rectify
4157 * this, the entire 8-byte structure is byteswapped, followed by
4158 * a swap of the 4 byte val field. In other cases, the data is
4159 * already in proper host byte order. */
4160 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4161 target_su.buf = tswapal(target_su.buf);
4162 arg.val = tswap32(target_su.val);
4164 arg.val = target_su.val;
4166 ret = get_errno(semctl(semid, semnum, cmd, arg));
4170 err = target_to_host_semarray(semid, &array, target_su.array);
4174 ret = get_errno(semctl(semid, semnum, cmd, arg));
4175 err = host_to_target_semarray(semid, target_su.array, &array);
4182 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4186 ret = get_errno(semctl(semid, semnum, cmd, arg));
4187 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4193 arg.__buf = &seminfo;
4194 ret = get_errno(semctl(semid, semnum, cmd, arg));
4195 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4203 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4210 struct target_sembuf {
4211 unsigned short sem_num;
4216 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4217 abi_ulong target_addr,
4220 struct target_sembuf *target_sembuf;
4223 target_sembuf = lock_user(VERIFY_READ, target_addr,
4224 nsops*sizeof(struct target_sembuf), 1);
4226 return -TARGET_EFAULT;
4228 for(i=0; i<nsops; i++) {
4229 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4230 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4231 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4234 unlock_user(target_sembuf, target_addr, 0);
4239 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4241 struct sembuf sops[nsops];
4243 if (target_to_host_sembuf(sops, ptr, nsops))
4244 return -TARGET_EFAULT;
4246 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4249 struct target_msqid_ds
4251 struct target_ipc_perm msg_perm;
4252 abi_ulong msg_stime;
4253 #if TARGET_ABI_BITS == 32
4254 abi_ulong __unused1;
4256 abi_ulong msg_rtime;
4257 #if TARGET_ABI_BITS == 32
4258 abi_ulong __unused2;
4260 abi_ulong msg_ctime;
4261 #if TARGET_ABI_BITS == 32
4262 abi_ulong __unused3;
4264 abi_ulong __msg_cbytes;
4266 abi_ulong msg_qbytes;
4267 abi_ulong msg_lspid;
4268 abi_ulong msg_lrpid;
4269 abi_ulong __unused4;
4270 abi_ulong __unused5;
4273 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4274 abi_ulong target_addr)
4276 struct target_msqid_ds *target_md;
4278 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4279 return -TARGET_EFAULT;
4280 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4281 return -TARGET_EFAULT;
4282 host_md->msg_stime = tswapal(target_md->msg_stime);
4283 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4284 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4285 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4286 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4287 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4288 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4289 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4290 unlock_user_struct(target_md, target_addr, 0);
4294 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4295 struct msqid_ds *host_md)
4297 struct target_msqid_ds *target_md;
4299 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4300 return -TARGET_EFAULT;
4301 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4302 return -TARGET_EFAULT;
4303 target_md->msg_stime = tswapal(host_md->msg_stime);
4304 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4305 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4306 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4307 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4308 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4309 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4310 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4311 unlock_user_struct(target_md, target_addr, 1);
4315 struct target_msginfo {
4323 unsigned short int msgseg;
4326 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4327 struct msginfo *host_msginfo)
4329 struct target_msginfo *target_msginfo;
4330 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4331 return -TARGET_EFAULT;
4332 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4333 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4334 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4335 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4336 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4337 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4338 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4339 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4340 unlock_user_struct(target_msginfo, target_addr, 1);
4344 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4346 struct msqid_ds dsarg;
4347 struct msginfo msginfo;
4348 abi_long ret = -TARGET_EINVAL;
4356 if (target_to_host_msqid_ds(&dsarg,ptr))
4357 return -TARGET_EFAULT;
4358 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4359 if (host_to_target_msqid_ds(ptr,&dsarg))
4360 return -TARGET_EFAULT;
4363 ret = get_errno(msgctl(msgid, cmd, NULL));
4367 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4368 if (host_to_target_msginfo(ptr, &msginfo))
4369 return -TARGET_EFAULT;
4376 struct target_msgbuf {
4381 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4382 ssize_t msgsz, int msgflg)
4384 struct target_msgbuf *target_mb;
4385 struct msgbuf *host_mb;
4389 return -TARGET_EINVAL;
4392 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4393 return -TARGET_EFAULT;
4394 host_mb = g_try_malloc(msgsz + sizeof(long));
4396 unlock_user_struct(target_mb, msgp, 0);
4397 return -TARGET_ENOMEM;
4399 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4400 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4401 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4403 unlock_user_struct(target_mb, msgp, 0);
4408 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4409 ssize_t msgsz, abi_long msgtyp,
4412 struct target_msgbuf *target_mb;
4414 struct msgbuf *host_mb;
4418 return -TARGET_EINVAL;
4421 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4422 return -TARGET_EFAULT;
4424 host_mb = g_try_malloc(msgsz + sizeof(long));
4426 ret = -TARGET_ENOMEM;
4429 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4432 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4433 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4434 if (!target_mtext) {
4435 ret = -TARGET_EFAULT;
4438 memcpy(target_mb->mtext, host_mb->mtext, ret);
4439 unlock_user(target_mtext, target_mtext_addr, ret);
4442 target_mb->mtype = tswapal(host_mb->mtype);
4446 unlock_user_struct(target_mb, msgp, 1);
4451 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4452 abi_ulong target_addr)
4454 struct target_shmid_ds *target_sd;
4456 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4457 return -TARGET_EFAULT;
4458 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4459 return -TARGET_EFAULT;
4460 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4461 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4462 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4463 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4464 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4465 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4466 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4467 unlock_user_struct(target_sd, target_addr, 0);
4471 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4472 struct shmid_ds *host_sd)
4474 struct target_shmid_ds *target_sd;
4476 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4477 return -TARGET_EFAULT;
4478 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4479 return -TARGET_EFAULT;
4480 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4481 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4482 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4483 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4484 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4485 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4486 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4487 unlock_user_struct(target_sd, target_addr, 1);
4491 struct target_shminfo {
4499 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4500 struct shminfo *host_shminfo)
4502 struct target_shminfo *target_shminfo;
4503 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4504 return -TARGET_EFAULT;
4505 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4506 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4507 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4508 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4509 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4510 unlock_user_struct(target_shminfo, target_addr, 1);
4514 struct target_shm_info {
4519 abi_ulong swap_attempts;
4520 abi_ulong swap_successes;
4523 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4524 struct shm_info *host_shm_info)
4526 struct target_shm_info *target_shm_info;
4527 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4528 return -TARGET_EFAULT;
4529 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4530 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4531 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4532 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4533 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4534 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4535 unlock_user_struct(target_shm_info, target_addr, 1);
4539 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4541 struct shmid_ds dsarg;
4542 struct shminfo shminfo;
4543 struct shm_info shm_info;
4544 abi_long ret = -TARGET_EINVAL;
4552 if (target_to_host_shmid_ds(&dsarg, buf))
4553 return -TARGET_EFAULT;
4554 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4555 if (host_to_target_shmid_ds(buf, &dsarg))
4556 return -TARGET_EFAULT;
4559 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4560 if (host_to_target_shminfo(buf, &shminfo))
4561 return -TARGET_EFAULT;
4564 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4565 if (host_to_target_shm_info(buf, &shm_info))
4566 return -TARGET_EFAULT;
4571 ret = get_errno(shmctl(shmid, cmd, NULL));
4578 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4582 struct shmid_ds shm_info;
4585 /* find out the length of the shared memory segment */
4586 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4587 if (is_error(ret)) {
4588 /* can't get length, bail out */
4595 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4597 abi_ulong mmap_start;
4599 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4601 if (mmap_start == -1) {
4603 host_raddr = (void *)-1;
4605 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4608 if (host_raddr == (void *)-1) {
4610 return get_errno((long)host_raddr);
4612 raddr=h2g((unsigned long)host_raddr);
4614 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4615 PAGE_VALID | PAGE_READ |
4616 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4618 for (i = 0; i < N_SHM_REGIONS; i++) {
4619 if (!shm_regions[i].in_use) {
4620 shm_regions[i].in_use = true;
4621 shm_regions[i].start = raddr;
4622 shm_regions[i].size = shm_info.shm_segsz;
4632 static inline abi_long do_shmdt(abi_ulong shmaddr)
4636 for (i = 0; i < N_SHM_REGIONS; ++i) {
4637 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4638 shm_regions[i].in_use = false;
4639 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4644 return get_errno(shmdt(g2h(shmaddr)));
4647 #ifdef TARGET_NR_ipc
4648 /* ??? This only works with linear mappings. */
4649 /* do_ipc() must return target values and target errnos. */
4650 static abi_long do_ipc(unsigned int call, abi_long first,
4651 abi_long second, abi_long third,
4652 abi_long ptr, abi_long fifth)
4657 version = call >> 16;
4662 ret = do_semop(first, ptr, second);
4666 ret = get_errno(semget(first, second, third));
4669 case IPCOP_semctl: {
4670 /* The semun argument to semctl is passed by value, so dereference the
4673 get_user_ual(atptr, ptr);
4674 ret = do_semctl(first, second, third, atptr);
4679 ret = get_errno(msgget(first, second));
4683 ret = do_msgsnd(first, ptr, second, third);
4687 ret = do_msgctl(first, second, ptr);
4694 struct target_ipc_kludge {
4699 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4700 ret = -TARGET_EFAULT;
4704 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4706 unlock_user_struct(tmp, ptr, 0);
4710 ret = do_msgrcv(first, ptr, second, fifth, third);
4719 raddr = do_shmat(first, ptr, second);
4720 if (is_error(raddr))
4721 return get_errno(raddr);
4722 if (put_user_ual(raddr, third))
4723 return -TARGET_EFAULT;
4727 ret = -TARGET_EINVAL;
4732 ret = do_shmdt(ptr);
4736 /* IPC_* flag values are the same on all linux platforms */
4737 ret = get_errno(shmget(first, second, third));
4740 /* IPC_* and SHM_* command values are the same on all linux platforms */
4742 ret = do_shmctl(first, second, ptr);
4745 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4746 ret = -TARGET_ENOSYS;
4753 /* kernel structure types definitions */
4755 #define STRUCT(name, ...) STRUCT_ ## name,
4756 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4758 #include "syscall_types.h"
4762 #undef STRUCT_SPECIAL
4764 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4765 #define STRUCT_SPECIAL(name)
4766 #include "syscall_types.h"
4768 #undef STRUCT_SPECIAL
4770 typedef struct IOCTLEntry IOCTLEntry;
4772 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4773 int fd, int cmd, abi_long arg);
4777 unsigned int host_cmd;
4780 do_ioctl_fn *do_ioctl;
4781 const argtype arg_type[5];
4784 #define IOC_R 0x0001
4785 #define IOC_W 0x0002
4786 #define IOC_RW (IOC_R | IOC_W)
4788 #define MAX_STRUCT_SIZE 4096
4790 #ifdef CONFIG_FIEMAP
4791 /* So fiemap access checks don't overflow on 32 bit systems.
4792 * This is very slightly smaller than the limit imposed by
4793 * the underlying kernel.
4795 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4796 / sizeof(struct fiemap_extent))
4798 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4799 int fd, int cmd, abi_long arg)
4801 /* The parameter for this ioctl is a struct fiemap followed
4802 * by an array of struct fiemap_extent whose size is set
4803 * in fiemap->fm_extent_count. The array is filled in by the
4806 int target_size_in, target_size_out;
4808 const argtype *arg_type = ie->arg_type;
4809 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4812 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4816 assert(arg_type[0] == TYPE_PTR);
4817 assert(ie->access == IOC_RW);
4819 target_size_in = thunk_type_size(arg_type, 0);
4820 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4822 return -TARGET_EFAULT;
4824 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4825 unlock_user(argptr, arg, 0);
4826 fm = (struct fiemap *)buf_temp;
4827 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4828 return -TARGET_EINVAL;
4831 outbufsz = sizeof (*fm) +
4832 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4834 if (outbufsz > MAX_STRUCT_SIZE) {
4835 /* We can't fit all the extents into the fixed size buffer.
4836 * Allocate one that is large enough and use it instead.
4838 fm = g_try_malloc(outbufsz);
4840 return -TARGET_ENOMEM;
4842 memcpy(fm, buf_temp, sizeof(struct fiemap));
4845 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4846 if (!is_error(ret)) {
4847 target_size_out = target_size_in;
4848 /* An extent_count of 0 means we were only counting the extents
4849 * so there are no structs to copy
4851 if (fm->fm_extent_count != 0) {
4852 target_size_out += fm->fm_mapped_extents * extent_size;
4854 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4856 ret = -TARGET_EFAULT;
4858 /* Convert the struct fiemap */
4859 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4860 if (fm->fm_extent_count != 0) {
4861 p = argptr + target_size_in;
4862 /* ...and then all the struct fiemap_extents */
4863 for (i = 0; i < fm->fm_mapped_extents; i++) {
4864 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4869 unlock_user(argptr, arg, target_size_out);
4879 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4880 int fd, int cmd, abi_long arg)
4882 const argtype *arg_type = ie->arg_type;
4886 struct ifconf *host_ifconf;
4888 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4889 int target_ifreq_size;
4894 abi_long target_ifc_buf;
4898 assert(arg_type[0] == TYPE_PTR);
4899 assert(ie->access == IOC_RW);
4902 target_size = thunk_type_size(arg_type, 0);
4904 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4906 return -TARGET_EFAULT;
4907 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4908 unlock_user(argptr, arg, 0);
4910 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4911 target_ifc_len = host_ifconf->ifc_len;
4912 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4914 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4915 nb_ifreq = target_ifc_len / target_ifreq_size;
4916 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4918 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4919 if (outbufsz > MAX_STRUCT_SIZE) {
4920 /* We can't fit all the extents into the fixed size buffer.
4921 * Allocate one that is large enough and use it instead.
4923 host_ifconf = malloc(outbufsz);
4925 return -TARGET_ENOMEM;
4927 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4930 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4932 host_ifconf->ifc_len = host_ifc_len;
4933 host_ifconf->ifc_buf = host_ifc_buf;
4935 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4936 if (!is_error(ret)) {
4937 /* convert host ifc_len to target ifc_len */
4939 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4940 target_ifc_len = nb_ifreq * target_ifreq_size;
4941 host_ifconf->ifc_len = target_ifc_len;
4943 /* restore target ifc_buf */
4945 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4947 /* copy struct ifconf to target user */
4949 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4951 return -TARGET_EFAULT;
4952 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4953 unlock_user(argptr, arg, target_size);
4955 /* copy ifreq[] to target user */
4957 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4958 for (i = 0; i < nb_ifreq ; i++) {
4959 thunk_convert(argptr + i * target_ifreq_size,
4960 host_ifc_buf + i * sizeof(struct ifreq),
4961 ifreq_arg_type, THUNK_TARGET);
4963 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4973 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4974 int cmd, abi_long arg)
4977 struct dm_ioctl *host_dm;
4978 abi_long guest_data;
4979 uint32_t guest_data_size;
4981 const argtype *arg_type = ie->arg_type;
4983 void *big_buf = NULL;
4987 target_size = thunk_type_size(arg_type, 0);
4988 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4990 ret = -TARGET_EFAULT;
4993 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4994 unlock_user(argptr, arg, 0);
4996 /* buf_temp is too small, so fetch things into a bigger buffer */
4997 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4998 memcpy(big_buf, buf_temp, target_size);
5002 guest_data = arg + host_dm->data_start;
5003 if ((guest_data - arg) < 0) {
5004 ret = -TARGET_EINVAL;
5007 guest_data_size = host_dm->data_size - host_dm->data_start;
5008 host_data = (char*)host_dm + host_dm->data_start;
5010 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5012 ret = -TARGET_EFAULT;
5016 switch (ie->host_cmd) {
5018 case DM_LIST_DEVICES:
5021 case DM_DEV_SUSPEND:
5024 case DM_TABLE_STATUS:
5025 case DM_TABLE_CLEAR:
5027 case DM_LIST_VERSIONS:
5031 case DM_DEV_SET_GEOMETRY:
5032 /* data contains only strings */
5033 memcpy(host_data, argptr, guest_data_size);
5036 memcpy(host_data, argptr, guest_data_size);
5037 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5041 void *gspec = argptr;
5042 void *cur_data = host_data;
5043 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5044 int spec_size = thunk_type_size(arg_type, 0);
5047 for (i = 0; i < host_dm->target_count; i++) {
5048 struct dm_target_spec *spec = cur_data;
5052 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5053 slen = strlen((char*)gspec + spec_size) + 1;
5055 spec->next = sizeof(*spec) + slen;
5056 strcpy((char*)&spec[1], gspec + spec_size);
5058 cur_data += spec->next;
5063 ret = -TARGET_EINVAL;
5064 unlock_user(argptr, guest_data, 0);
5067 unlock_user(argptr, guest_data, 0);
5069 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5070 if (!is_error(ret)) {
5071 guest_data = arg + host_dm->data_start;
5072 guest_data_size = host_dm->data_size - host_dm->data_start;
5073 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5074 switch (ie->host_cmd) {
5079 case DM_DEV_SUSPEND:
5082 case DM_TABLE_CLEAR:
5084 case DM_DEV_SET_GEOMETRY:
5085 /* no return data */
5087 case DM_LIST_DEVICES:
5089 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5090 uint32_t remaining_data = guest_data_size;
5091 void *cur_data = argptr;
5092 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5093 int nl_size = 12; /* can't use thunk_size due to alignment */
5096 uint32_t next = nl->next;
5098 nl->next = nl_size + (strlen(nl->name) + 1);
5100 if (remaining_data < nl->next) {
5101 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5104 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5105 strcpy(cur_data + nl_size, nl->name);
5106 cur_data += nl->next;
5107 remaining_data -= nl->next;
5111 nl = (void*)nl + next;
5116 case DM_TABLE_STATUS:
5118 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5119 void *cur_data = argptr;
5120 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5121 int spec_size = thunk_type_size(arg_type, 0);
5124 for (i = 0; i < host_dm->target_count; i++) {
5125 uint32_t next = spec->next;
5126 int slen = strlen((char*)&spec[1]) + 1;
5127 spec->next = (cur_data - argptr) + spec_size + slen;
5128 if (guest_data_size < spec->next) {
5129 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5132 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5133 strcpy(cur_data + spec_size, (char*)&spec[1]);
5134 cur_data = argptr + spec->next;
5135 spec = (void*)host_dm + host_dm->data_start + next;
5141 void *hdata = (void*)host_dm + host_dm->data_start;
5142 int count = *(uint32_t*)hdata;
5143 uint64_t *hdev = hdata + 8;
5144 uint64_t *gdev = argptr + 8;
5147 *(uint32_t*)argptr = tswap32(count);
5148 for (i = 0; i < count; i++) {
5149 *gdev = tswap64(*hdev);
5155 case DM_LIST_VERSIONS:
5157 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5158 uint32_t remaining_data = guest_data_size;
5159 void *cur_data = argptr;
5160 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5161 int vers_size = thunk_type_size(arg_type, 0);
5164 uint32_t next = vers->next;
5166 vers->next = vers_size + (strlen(vers->name) + 1);
5168 if (remaining_data < vers->next) {
5169 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5172 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5173 strcpy(cur_data + vers_size, vers->name);
5174 cur_data += vers->next;
5175 remaining_data -= vers->next;
5179 vers = (void*)vers + next;
5184 unlock_user(argptr, guest_data, 0);
5185 ret = -TARGET_EINVAL;
5188 unlock_user(argptr, guest_data, guest_data_size);
5190 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5192 ret = -TARGET_EFAULT;
5195 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5196 unlock_user(argptr, arg, target_size);
5203 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5204 int cmd, abi_long arg)
5208 const argtype *arg_type = ie->arg_type;
5209 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5212 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5213 struct blkpg_partition host_part;
5215 /* Read and convert blkpg */
5217 target_size = thunk_type_size(arg_type, 0);
5218 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5220 ret = -TARGET_EFAULT;
5223 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5224 unlock_user(argptr, arg, 0);
5226 switch (host_blkpg->op) {
5227 case BLKPG_ADD_PARTITION:
5228 case BLKPG_DEL_PARTITION:
5229 /* payload is struct blkpg_partition */
5232 /* Unknown opcode */
5233 ret = -TARGET_EINVAL;
5237 /* Read and convert blkpg->data */
5238 arg = (abi_long)(uintptr_t)host_blkpg->data;
5239 target_size = thunk_type_size(part_arg_type, 0);
5240 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5242 ret = -TARGET_EFAULT;
5245 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5246 unlock_user(argptr, arg, 0);
5248 /* Swizzle the data pointer to our local copy and call! */
5249 host_blkpg->data = &host_part;
5250 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5256 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5257 int fd, int cmd, abi_long arg)
5259 const argtype *arg_type = ie->arg_type;
5260 const StructEntry *se;
5261 const argtype *field_types;
5262 const int *dst_offsets, *src_offsets;
5265 abi_ulong *target_rt_dev_ptr;
5266 unsigned long *host_rt_dev_ptr;
5270 assert(ie->access == IOC_W);
5271 assert(*arg_type == TYPE_PTR);
5273 assert(*arg_type == TYPE_STRUCT);
5274 target_size = thunk_type_size(arg_type, 0);
5275 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5277 return -TARGET_EFAULT;
5280 assert(*arg_type == (int)STRUCT_rtentry);
5281 se = struct_entries + *arg_type++;
5282 assert(se->convert[0] == NULL);
5283 /* convert struct here to be able to catch rt_dev string */
5284 field_types = se->field_types;
5285 dst_offsets = se->field_offsets[THUNK_HOST];
5286 src_offsets = se->field_offsets[THUNK_TARGET];
5287 for (i = 0; i < se->nb_fields; i++) {
5288 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5289 assert(*field_types == TYPE_PTRVOID);
5290 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5291 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5292 if (*target_rt_dev_ptr != 0) {
5293 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5294 tswapal(*target_rt_dev_ptr));
5295 if (!*host_rt_dev_ptr) {
5296 unlock_user(argptr, arg, 0);
5297 return -TARGET_EFAULT;
5300 *host_rt_dev_ptr = 0;
5305 field_types = thunk_convert(buf_temp + dst_offsets[i],
5306 argptr + src_offsets[i],
5307 field_types, THUNK_HOST);
5309 unlock_user(argptr, arg, 0);
5311 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5312 if (*host_rt_dev_ptr != 0) {
5313 unlock_user((void *)*host_rt_dev_ptr,
5314 *target_rt_dev_ptr, 0);
5319 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5320 int fd, int cmd, abi_long arg)
5322 int sig = target_to_host_signal(arg);
5323 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5326 static IOCTLEntry ioctl_entries[] = {
5327 #define IOCTL(cmd, access, ...) \
5328 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5329 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5330 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5335 /* ??? Implement proper locking for ioctls. */
5336 /* do_ioctl() Must return target values and target errnos. */
5337 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5339 const IOCTLEntry *ie;
5340 const argtype *arg_type;
5342 uint8_t buf_temp[MAX_STRUCT_SIZE];
5348 if (ie->target_cmd == 0) {
5349 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5350 return -TARGET_ENOSYS;
5352 if (ie->target_cmd == cmd)
5356 arg_type = ie->arg_type;
5358 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5361 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5364 switch(arg_type[0]) {
5367 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5371 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5375 target_size = thunk_type_size(arg_type, 0);
5376 switch(ie->access) {
5378 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5379 if (!is_error(ret)) {
5380 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5382 return -TARGET_EFAULT;
5383 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5384 unlock_user(argptr, arg, target_size);
5388 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5390 return -TARGET_EFAULT;
5391 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5392 unlock_user(argptr, arg, 0);
5393 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5397 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5399 return -TARGET_EFAULT;
5400 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5401 unlock_user(argptr, arg, 0);
5402 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5403 if (!is_error(ret)) {
5404 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5406 return -TARGET_EFAULT;
5407 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5408 unlock_user(argptr, arg, target_size);
5414 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5415 (long)cmd, arg_type[0]);
5416 ret = -TARGET_ENOSYS;
5422 static const bitmask_transtbl iflag_tbl[] = {
5423 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5424 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5425 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5426 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5427 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5428 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5429 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5430 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5431 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5432 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5433 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5434 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5435 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5436 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5440 static const bitmask_transtbl oflag_tbl[] = {
5441 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5442 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5443 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5444 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5445 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5446 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5447 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5448 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5449 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5450 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5451 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5452 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5453 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5454 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5455 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5456 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5457 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5458 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5459 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5460 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5461 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5462 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5463 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5464 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5468 static const bitmask_transtbl cflag_tbl[] = {
5469 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5470 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5471 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5472 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5473 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5474 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5475 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5476 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5477 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5478 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5479 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5480 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5481 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5482 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5483 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5484 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5485 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5486 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5487 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5488 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5489 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5490 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5491 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5492 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5493 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5494 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5495 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5496 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5497 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5498 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5499 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5503 static const bitmask_transtbl lflag_tbl[] = {
5504 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5505 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5506 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5507 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5508 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5509 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5510 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5511 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5512 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5513 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5514 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5515 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5516 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5517 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5518 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5522 static void target_to_host_termios (void *dst, const void *src)
5524 struct host_termios *host = dst;
5525 const struct target_termios *target = src;
5528 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5530 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5532 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5534 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5535 host->c_line = target->c_line;
5537 memset(host->c_cc, 0, sizeof(host->c_cc));
5538 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5539 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5540 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5541 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5542 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5543 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5544 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5545 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5546 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5547 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5548 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5549 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5550 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5551 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5552 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5553 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5554 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5557 static void host_to_target_termios (void *dst, const void *src)
5559 struct target_termios *target = dst;
5560 const struct host_termios *host = src;
5563 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5565 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5567 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5569 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5570 target->c_line = host->c_line;
5572 memset(target->c_cc, 0, sizeof(target->c_cc));
5573 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5574 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5575 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5576 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5577 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5578 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5579 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5580 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5581 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5582 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5583 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5584 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5585 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5586 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5587 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5588 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5589 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5592 static const StructEntry struct_termios_def = {
5593 .convert = { host_to_target_termios, target_to_host_termios },
5594 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5595 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5598 static bitmask_transtbl mmap_flags_tbl[] = {
5599 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5600 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5601 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5602 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5603 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5604 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5605 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5606 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5607 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5612 #if defined(TARGET_I386)
5614 /* NOTE: there is really one LDT for all the threads */
5615 static uint8_t *ldt_table;
5617 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5624 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5625 if (size > bytecount)
5627 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5629 return -TARGET_EFAULT;
5630 /* ??? Should this by byteswapped? */
5631 memcpy(p, ldt_table, size);
5632 unlock_user(p, ptr, size);
5636 /* XXX: add locking support */
5637 static abi_long write_ldt(CPUX86State *env,
5638 abi_ulong ptr, unsigned long bytecount, int oldmode)
5640 struct target_modify_ldt_ldt_s ldt_info;
5641 struct target_modify_ldt_ldt_s *target_ldt_info;
5642 int seg_32bit, contents, read_exec_only, limit_in_pages;
5643 int seg_not_present, useable, lm;
5644 uint32_t *lp, entry_1, entry_2;
5646 if (bytecount != sizeof(ldt_info))
5647 return -TARGET_EINVAL;
5648 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5649 return -TARGET_EFAULT;
5650 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5651 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5652 ldt_info.limit = tswap32(target_ldt_info->limit);
5653 ldt_info.flags = tswap32(target_ldt_info->flags);
5654 unlock_user_struct(target_ldt_info, ptr, 0);
5656 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5657 return -TARGET_EINVAL;
5658 seg_32bit = ldt_info.flags & 1;
5659 contents = (ldt_info.flags >> 1) & 3;
5660 read_exec_only = (ldt_info.flags >> 3) & 1;
5661 limit_in_pages = (ldt_info.flags >> 4) & 1;
5662 seg_not_present = (ldt_info.flags >> 5) & 1;
5663 useable = (ldt_info.flags >> 6) & 1;
5667 lm = (ldt_info.flags >> 7) & 1;
5669 if (contents == 3) {
5671 return -TARGET_EINVAL;
5672 if (seg_not_present == 0)
5673 return -TARGET_EINVAL;
5675 /* allocate the LDT */
5677 env->ldt.base = target_mmap(0,
5678 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5679 PROT_READ|PROT_WRITE,
5680 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5681 if (env->ldt.base == -1)
5682 return -TARGET_ENOMEM;
5683 memset(g2h(env->ldt.base), 0,
5684 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5685 env->ldt.limit = 0xffff;
5686 ldt_table = g2h(env->ldt.base);
5689 /* NOTE: same code as Linux kernel */
5690 /* Allow LDTs to be cleared by the user. */
5691 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5694 read_exec_only == 1 &&
5696 limit_in_pages == 0 &&
5697 seg_not_present == 1 &&
5705 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5706 (ldt_info.limit & 0x0ffff);
5707 entry_2 = (ldt_info.base_addr & 0xff000000) |
5708 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5709 (ldt_info.limit & 0xf0000) |
5710 ((read_exec_only ^ 1) << 9) |
5712 ((seg_not_present ^ 1) << 15) |
5714 (limit_in_pages << 23) |
5718 entry_2 |= (useable << 20);
5720 /* Install the new entry ... */
5722 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5723 lp[0] = tswap32(entry_1);
5724 lp[1] = tswap32(entry_2);
5728 /* specific and weird i386 syscalls */
5729 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5730 unsigned long bytecount)
5736 ret = read_ldt(ptr, bytecount);
5739 ret = write_ldt(env, ptr, bytecount, 1);
5742 ret = write_ldt(env, ptr, bytecount, 0);
5745 ret = -TARGET_ENOSYS;
5751 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5752 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5754 uint64_t *gdt_table = g2h(env->gdt.base);
5755 struct target_modify_ldt_ldt_s ldt_info;
5756 struct target_modify_ldt_ldt_s *target_ldt_info;
5757 int seg_32bit, contents, read_exec_only, limit_in_pages;
5758 int seg_not_present, useable, lm;
5759 uint32_t *lp, entry_1, entry_2;
5762 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5763 if (!target_ldt_info)
5764 return -TARGET_EFAULT;
5765 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5766 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5767 ldt_info.limit = tswap32(target_ldt_info->limit);
5768 ldt_info.flags = tswap32(target_ldt_info->flags);
5769 if (ldt_info.entry_number == -1) {
5770 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5771 if (gdt_table[i] == 0) {
5772 ldt_info.entry_number = i;
5773 target_ldt_info->entry_number = tswap32(i);
5778 unlock_user_struct(target_ldt_info, ptr, 1);
5780 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5781 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5782 return -TARGET_EINVAL;
5783 seg_32bit = ldt_info.flags & 1;
5784 contents = (ldt_info.flags >> 1) & 3;
5785 read_exec_only = (ldt_info.flags >> 3) & 1;
5786 limit_in_pages = (ldt_info.flags >> 4) & 1;
5787 seg_not_present = (ldt_info.flags >> 5) & 1;
5788 useable = (ldt_info.flags >> 6) & 1;
5792 lm = (ldt_info.flags >> 7) & 1;
5795 if (contents == 3) {
5796 if (seg_not_present == 0)
5797 return -TARGET_EINVAL;
5800 /* NOTE: same code as Linux kernel */
5801 /* Allow LDTs to be cleared by the user. */
5802 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5803 if ((contents == 0 &&
5804 read_exec_only == 1 &&
5806 limit_in_pages == 0 &&
5807 seg_not_present == 1 &&
5815 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5816 (ldt_info.limit & 0x0ffff);
5817 entry_2 = (ldt_info.base_addr & 0xff000000) |
5818 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5819 (ldt_info.limit & 0xf0000) |
5820 ((read_exec_only ^ 1) << 9) |
5822 ((seg_not_present ^ 1) << 15) |
5824 (limit_in_pages << 23) |
5829 /* Install the new entry ... */
5831 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5832 lp[0] = tswap32(entry_1);
5833 lp[1] = tswap32(entry_2);
5837 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5839 struct target_modify_ldt_ldt_s *target_ldt_info;
5840 uint64_t *gdt_table = g2h(env->gdt.base);
5841 uint32_t base_addr, limit, flags;
5842 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5843 int seg_not_present, useable, lm;
5844 uint32_t *lp, entry_1, entry_2;
5846 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5847 if (!target_ldt_info)
5848 return -TARGET_EFAULT;
5849 idx = tswap32(target_ldt_info->entry_number);
5850 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5851 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5852 unlock_user_struct(target_ldt_info, ptr, 1);
5853 return -TARGET_EINVAL;
5855 lp = (uint32_t *)(gdt_table + idx);
5856 entry_1 = tswap32(lp[0]);
5857 entry_2 = tswap32(lp[1]);
5859 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5860 contents = (entry_2 >> 10) & 3;
5861 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5862 seg_32bit = (entry_2 >> 22) & 1;
5863 limit_in_pages = (entry_2 >> 23) & 1;
5864 useable = (entry_2 >> 20) & 1;
5868 lm = (entry_2 >> 21) & 1;
5870 flags = (seg_32bit << 0) | (contents << 1) |
5871 (read_exec_only << 3) | (limit_in_pages << 4) |
5872 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5873 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5874 base_addr = (entry_1 >> 16) |
5875 (entry_2 & 0xff000000) |
5876 ((entry_2 & 0xff) << 16);
5877 target_ldt_info->base_addr = tswapal(base_addr);
5878 target_ldt_info->limit = tswap32(limit);
5879 target_ldt_info->flags = tswap32(flags);
5880 unlock_user_struct(target_ldt_info, ptr, 1);
5883 #endif /* TARGET_I386 && TARGET_ABI32 */
5885 #ifndef TARGET_ABI32
5886 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5893 case TARGET_ARCH_SET_GS:
5894 case TARGET_ARCH_SET_FS:
5895 if (code == TARGET_ARCH_SET_GS)
5899 cpu_x86_load_seg(env, idx, 0);
5900 env->segs[idx].base = addr;
5902 case TARGET_ARCH_GET_GS:
5903 case TARGET_ARCH_GET_FS:
5904 if (code == TARGET_ARCH_GET_GS)
5908 val = env->segs[idx].base;
5909 if (put_user(val, addr, abi_ulong))
5910 ret = -TARGET_EFAULT;
5913 ret = -TARGET_EINVAL;
5920 #endif /* defined(TARGET_I386) */
5922 #define NEW_STACK_SIZE 0x40000
5925 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5928 pthread_mutex_t mutex;
5929 pthread_cond_t cond;
5932 abi_ulong child_tidptr;
5933 abi_ulong parent_tidptr;
5937 static void *clone_func(void *arg)
5939 new_thread_info *info = arg;
5944 rcu_register_thread();
5946 cpu = ENV_GET_CPU(env);
5948 ts = (TaskState *)cpu->opaque;
5949 info->tid = gettid();
5950 cpu->host_tid = info->tid;
5952 if (info->child_tidptr)
5953 put_user_u32(info->tid, info->child_tidptr);
5954 if (info->parent_tidptr)
5955 put_user_u32(info->tid, info->parent_tidptr);
5956 /* Enable signals. */
5957 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5958 /* Signal to the parent that we're ready. */
5959 pthread_mutex_lock(&info->mutex);
5960 pthread_cond_broadcast(&info->cond);
5961 pthread_mutex_unlock(&info->mutex);
5962 /* Wait until the parent has finshed initializing the tls state. */
5963 pthread_mutex_lock(&clone_lock);
5964 pthread_mutex_unlock(&clone_lock);
5970 /* do_fork() Must return host values and target errnos (unlike most
5971 do_*() functions). */
5972 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5973 abi_ulong parent_tidptr, target_ulong newtls,
5974 abi_ulong child_tidptr)
5976 CPUState *cpu = ENV_GET_CPU(env);
5980 CPUArchState *new_env;
5981 unsigned int nptl_flags;
5984 /* Emulate vfork() with fork() */
5985 if (flags & CLONE_VFORK)
5986 flags &= ~(CLONE_VFORK | CLONE_VM);
5988 if (flags & CLONE_VM) {
5989 TaskState *parent_ts = (TaskState *)cpu->opaque;
5990 new_thread_info info;
5991 pthread_attr_t attr;
5993 ts = g_new0(TaskState, 1);
5994 init_task_state(ts);
5995 /* we create a new CPU instance. */
5996 new_env = cpu_copy(env);
5997 /* Init regs that differ from the parent. */
5998 cpu_clone_regs(new_env, newsp);
5999 new_cpu = ENV_GET_CPU(new_env);
6000 new_cpu->opaque = ts;
6001 ts->bprm = parent_ts->bprm;
6002 ts->info = parent_ts->info;
6003 ts->signal_mask = parent_ts->signal_mask;
6005 flags &= ~CLONE_NPTL_FLAGS2;
6007 if (nptl_flags & CLONE_CHILD_CLEARTID) {
6008 ts->child_tidptr = child_tidptr;
6011 if (nptl_flags & CLONE_SETTLS)
6012 cpu_set_tls (new_env, newtls);
6014 /* Grab a mutex so that thread setup appears atomic. */
6015 pthread_mutex_lock(&clone_lock);
6017 memset(&info, 0, sizeof(info));
6018 pthread_mutex_init(&info.mutex, NULL);
6019 pthread_mutex_lock(&info.mutex);
6020 pthread_cond_init(&info.cond, NULL);
6022 if (nptl_flags & CLONE_CHILD_SETTID)
6023 info.child_tidptr = child_tidptr;
6024 if (nptl_flags & CLONE_PARENT_SETTID)
6025 info.parent_tidptr = parent_tidptr;
6027 ret = pthread_attr_init(&attr);
6028 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6029 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6030 /* It is not safe to deliver signals until the child has finished
6031 initializing, so temporarily block all signals. */
6032 sigfillset(&sigmask);
6033 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6035 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6036 /* TODO: Free new CPU state if thread creation failed. */
6038 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6039 pthread_attr_destroy(&attr);
6041 /* Wait for the child to initialize. */
6042 pthread_cond_wait(&info.cond, &info.mutex);
6044 if (flags & CLONE_PARENT_SETTID)
6045 put_user_u32(ret, parent_tidptr);
6049 pthread_mutex_unlock(&info.mutex);
6050 pthread_cond_destroy(&info.cond);
6051 pthread_mutex_destroy(&info.mutex);
6052 pthread_mutex_unlock(&clone_lock);
6054 /* if no CLONE_VM, we consider it is a fork */
6055 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
6056 return -TARGET_EINVAL;
6059 if (block_signals()) {
6060 return -TARGET_ERESTARTSYS;
6066 /* Child Process. */
6068 cpu_clone_regs(env, newsp);
6070 /* There is a race condition here. The parent process could
6071 theoretically read the TID in the child process before the child
6072 tid is set. This would require using either ptrace
6073 (not implemented) or having *_tidptr to point at a shared memory
6074 mapping. We can't repeat the spinlock hack used above because
6075 the child process gets its own copy of the lock. */
6076 if (flags & CLONE_CHILD_SETTID)
6077 put_user_u32(gettid(), child_tidptr);
6078 if (flags & CLONE_PARENT_SETTID)
6079 put_user_u32(gettid(), parent_tidptr);
6080 ts = (TaskState *)cpu->opaque;
6081 if (flags & CLONE_SETTLS)
6082 cpu_set_tls (env, newtls);
6083 if (flags & CLONE_CHILD_CLEARTID)
6084 ts->child_tidptr = child_tidptr;
6092 /* warning : doesn't handle linux specific flags... */
6093 static int target_to_host_fcntl_cmd(int cmd)
6096 case TARGET_F_DUPFD:
6097 case TARGET_F_GETFD:
6098 case TARGET_F_SETFD:
6099 case TARGET_F_GETFL:
6100 case TARGET_F_SETFL:
6102 case TARGET_F_GETLK:
6104 case TARGET_F_SETLK:
6106 case TARGET_F_SETLKW:
6108 case TARGET_F_GETOWN:
6110 case TARGET_F_SETOWN:
6112 case TARGET_F_GETSIG:
6114 case TARGET_F_SETSIG:
6116 #if TARGET_ABI_BITS == 32
6117 case TARGET_F_GETLK64:
6119 case TARGET_F_SETLK64:
6121 case TARGET_F_SETLKW64:
6124 case TARGET_F_SETLEASE:
6126 case TARGET_F_GETLEASE:
6128 #ifdef F_DUPFD_CLOEXEC
6129 case TARGET_F_DUPFD_CLOEXEC:
6130 return F_DUPFD_CLOEXEC;
6132 case TARGET_F_NOTIFY:
6135 case TARGET_F_GETOWN_EX:
6139 case TARGET_F_SETOWN_EX:
6143 case TARGET_F_SETPIPE_SZ:
6144 return F_SETPIPE_SZ;
6145 case TARGET_F_GETPIPE_SZ:
6146 return F_GETPIPE_SZ;
6149 return -TARGET_EINVAL;
6151 return -TARGET_EINVAL;
6154 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6155 static const bitmask_transtbl flock_tbl[] = {
6156 TRANSTBL_CONVERT(F_RDLCK),
6157 TRANSTBL_CONVERT(F_WRLCK),
6158 TRANSTBL_CONVERT(F_UNLCK),
6159 TRANSTBL_CONVERT(F_EXLCK),
6160 TRANSTBL_CONVERT(F_SHLCK),
6164 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6165 abi_ulong target_flock_addr)
6167 struct target_flock *target_fl;
6170 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6171 return -TARGET_EFAULT;
6174 __get_user(l_type, &target_fl->l_type);
6175 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6176 __get_user(fl->l_whence, &target_fl->l_whence);
6177 __get_user(fl->l_start, &target_fl->l_start);
6178 __get_user(fl->l_len, &target_fl->l_len);
6179 __get_user(fl->l_pid, &target_fl->l_pid);
6180 unlock_user_struct(target_fl, target_flock_addr, 0);
6184 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6185 const struct flock64 *fl)
6187 struct target_flock *target_fl;
6190 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6191 return -TARGET_EFAULT;
6194 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6195 __put_user(l_type, &target_fl->l_type);
6196 __put_user(fl->l_whence, &target_fl->l_whence);
6197 __put_user(fl->l_start, &target_fl->l_start);
6198 __put_user(fl->l_len, &target_fl->l_len);
6199 __put_user(fl->l_pid, &target_fl->l_pid);
6200 unlock_user_struct(target_fl, target_flock_addr, 1);
6204 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6205 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6207 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6208 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6209 abi_ulong target_flock_addr)
6211 struct target_eabi_flock64 *target_fl;
6214 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6215 return -TARGET_EFAULT;
6218 __get_user(l_type, &target_fl->l_type);
6219 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6220 __get_user(fl->l_whence, &target_fl->l_whence);
6221 __get_user(fl->l_start, &target_fl->l_start);
6222 __get_user(fl->l_len, &target_fl->l_len);
6223 __get_user(fl->l_pid, &target_fl->l_pid);
6224 unlock_user_struct(target_fl, target_flock_addr, 0);
6228 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6229 const struct flock64 *fl)
6231 struct target_eabi_flock64 *target_fl;
6234 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6235 return -TARGET_EFAULT;
6238 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6239 __put_user(l_type, &target_fl->l_type);
6240 __put_user(fl->l_whence, &target_fl->l_whence);
6241 __put_user(fl->l_start, &target_fl->l_start);
6242 __put_user(fl->l_len, &target_fl->l_len);
6243 __put_user(fl->l_pid, &target_fl->l_pid);
6244 unlock_user_struct(target_fl, target_flock_addr, 1);
6249 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6250 abi_ulong target_flock_addr)
6252 struct target_flock64 *target_fl;
6255 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6256 return -TARGET_EFAULT;
6259 __get_user(l_type, &target_fl->l_type);
6260 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6261 __get_user(fl->l_whence, &target_fl->l_whence);
6262 __get_user(fl->l_start, &target_fl->l_start);
6263 __get_user(fl->l_len, &target_fl->l_len);
6264 __get_user(fl->l_pid, &target_fl->l_pid);
6265 unlock_user_struct(target_fl, target_flock_addr, 0);
6269 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6270 const struct flock64 *fl)
6272 struct target_flock64 *target_fl;
6275 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6276 return -TARGET_EFAULT;
6279 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6280 __put_user(l_type, &target_fl->l_type);
6281 __put_user(fl->l_whence, &target_fl->l_whence);
6282 __put_user(fl->l_start, &target_fl->l_start);
6283 __put_user(fl->l_len, &target_fl->l_len);
6284 __put_user(fl->l_pid, &target_fl->l_pid);
6285 unlock_user_struct(target_fl, target_flock_addr, 1);
6289 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6291 struct flock64 fl64;
6293 struct f_owner_ex fox;
6294 struct target_f_owner_ex *target_fox;
6297 int host_cmd = target_to_host_fcntl_cmd(cmd);
6299 if (host_cmd == -TARGET_EINVAL)
6303 case TARGET_F_GETLK:
6304 ret = copy_from_user_flock(&fl64, arg);
6308 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6310 ret = copy_to_user_flock(arg, &fl64);
6314 case TARGET_F_SETLK:
6315 case TARGET_F_SETLKW:
6316 ret = copy_from_user_flock(&fl64, arg);
6320 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6323 case TARGET_F_GETLK64:
6324 ret = copy_from_user_flock64(&fl64, arg);
6328 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6330 ret = copy_to_user_flock64(arg, &fl64);
6333 case TARGET_F_SETLK64:
6334 case TARGET_F_SETLKW64:
6335 ret = copy_from_user_flock64(&fl64, arg);
6339 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6342 case TARGET_F_GETFL:
6343 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6345 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6349 case TARGET_F_SETFL:
6350 ret = get_errno(safe_fcntl(fd, host_cmd,
6351 target_to_host_bitmask(arg,
6356 case TARGET_F_GETOWN_EX:
6357 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6359 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6360 return -TARGET_EFAULT;
6361 target_fox->type = tswap32(fox.type);
6362 target_fox->pid = tswap32(fox.pid);
6363 unlock_user_struct(target_fox, arg, 1);
6369 case TARGET_F_SETOWN_EX:
6370 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6371 return -TARGET_EFAULT;
6372 fox.type = tswap32(target_fox->type);
6373 fox.pid = tswap32(target_fox->pid);
6374 unlock_user_struct(target_fox, arg, 0);
6375 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6379 case TARGET_F_SETOWN:
6380 case TARGET_F_GETOWN:
6381 case TARGET_F_SETSIG:
6382 case TARGET_F_GETSIG:
6383 case TARGET_F_SETLEASE:
6384 case TARGET_F_GETLEASE:
6385 case TARGET_F_SETPIPE_SZ:
6386 case TARGET_F_GETPIPE_SZ:
6387 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6391 ret = get_errno(safe_fcntl(fd, cmd, arg));
6399 static inline int high2lowuid(int uid)
6407 static inline int high2lowgid(int gid)
6415 static inline int low2highuid(int uid)
6417 if ((int16_t)uid == -1)
6423 static inline int low2highgid(int gid)
6425 if ((int16_t)gid == -1)
6430 static inline int tswapid(int id)
6435 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6437 #else /* !USE_UID16 */
6438 static inline int high2lowuid(int uid)
6442 static inline int high2lowgid(int gid)
6446 static inline int low2highuid(int uid)
6450 static inline int low2highgid(int gid)
6454 static inline int tswapid(int id)
6459 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6461 #endif /* USE_UID16 */
6463 /* We must do direct syscalls for setting UID/GID, because we want to
6464 * implement the Linux system call semantics of "change only for this thread",
6465 * not the libc/POSIX semantics of "change for all threads in process".
6466 * (See http://ewontfix.com/17/ for more details.)
6467 * We use the 32-bit version of the syscalls if present; if it is not
6468 * then either the host architecture supports 32-bit UIDs natively with
6469 * the standard syscall, or the 16-bit UID is the best we can do.
6471 #ifdef __NR_setuid32
6472 #define __NR_sys_setuid __NR_setuid32
6474 #define __NR_sys_setuid __NR_setuid
6476 #ifdef __NR_setgid32
6477 #define __NR_sys_setgid __NR_setgid32
6479 #define __NR_sys_setgid __NR_setgid
6481 #ifdef __NR_setresuid32
6482 #define __NR_sys_setresuid __NR_setresuid32
6484 #define __NR_sys_setresuid __NR_setresuid
6486 #ifdef __NR_setresgid32
6487 #define __NR_sys_setresgid __NR_setresgid32
6489 #define __NR_sys_setresgid __NR_setresgid
6492 _syscall1(int, sys_setuid, uid_t, uid)
6493 _syscall1(int, sys_setgid, gid_t, gid)
6494 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6495 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6497 void syscall_init(void)
6500 const argtype *arg_type;
6504 thunk_init(STRUCT_MAX);
6506 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6507 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6508 #include "syscall_types.h"
6510 #undef STRUCT_SPECIAL
6512 /* Build target_to_host_errno_table[] table from
6513 * host_to_target_errno_table[]. */
6514 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6515 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6518 /* we patch the ioctl size if necessary. We rely on the fact that
6519 no ioctl has all the bits at '1' in the size field */
6521 while (ie->target_cmd != 0) {
6522 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6523 TARGET_IOC_SIZEMASK) {
6524 arg_type = ie->arg_type;
6525 if (arg_type[0] != TYPE_PTR) {
6526 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6531 size = thunk_type_size(arg_type, 0);
6532 ie->target_cmd = (ie->target_cmd &
6533 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6534 (size << TARGET_IOC_SIZESHIFT);
6537 /* automatic consistency check if same arch */
6538 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6539 (defined(__x86_64__) && defined(TARGET_X86_64))
6540 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6541 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6542 ie->name, ie->target_cmd, ie->host_cmd);
6549 #if TARGET_ABI_BITS == 32
6550 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6552 #ifdef TARGET_WORDS_BIGENDIAN
6553 return ((uint64_t)word0 << 32) | word1;
6555 return ((uint64_t)word1 << 32) | word0;
6558 #else /* TARGET_ABI_BITS == 32 */
6559 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6563 #endif /* TARGET_ABI_BITS != 32 */
6565 #ifdef TARGET_NR_truncate64
6566 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6571 if (regpairs_aligned(cpu_env)) {
6575 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6579 #ifdef TARGET_NR_ftruncate64
6580 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6585 if (regpairs_aligned(cpu_env)) {
6589 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6593 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6594 abi_ulong target_addr)
6596 struct target_timespec *target_ts;
6598 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6599 return -TARGET_EFAULT;
6600 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6601 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6602 unlock_user_struct(target_ts, target_addr, 0);
6606 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6607 struct timespec *host_ts)
6609 struct target_timespec *target_ts;
6611 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6612 return -TARGET_EFAULT;
6613 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6614 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6615 unlock_user_struct(target_ts, target_addr, 1);
6619 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6620 abi_ulong target_addr)
6622 struct target_itimerspec *target_itspec;
6624 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6625 return -TARGET_EFAULT;
6628 host_itspec->it_interval.tv_sec =
6629 tswapal(target_itspec->it_interval.tv_sec);
6630 host_itspec->it_interval.tv_nsec =
6631 tswapal(target_itspec->it_interval.tv_nsec);
6632 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6633 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6635 unlock_user_struct(target_itspec, target_addr, 1);
6639 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6640 struct itimerspec *host_its)
6642 struct target_itimerspec *target_itspec;
6644 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6645 return -TARGET_EFAULT;
6648 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6649 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6651 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6652 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6654 unlock_user_struct(target_itspec, target_addr, 0);
6658 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6659 abi_ulong target_addr)
6661 struct target_sigevent *target_sevp;
6663 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6664 return -TARGET_EFAULT;
6667 /* This union is awkward on 64 bit systems because it has a 32 bit
6668 * integer and a pointer in it; we follow the conversion approach
6669 * used for handling sigval types in signal.c so the guest should get
6670 * the correct value back even if we did a 64 bit byteswap and it's
6671 * using the 32 bit integer.
6673 host_sevp->sigev_value.sival_ptr =
6674 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6675 host_sevp->sigev_signo =
6676 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6677 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6678 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6680 unlock_user_struct(target_sevp, target_addr, 1);
6684 #if defined(TARGET_NR_mlockall)
6685 static inline int target_to_host_mlockall_arg(int arg)
6689 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6690 result |= MCL_CURRENT;
6692 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6693 result |= MCL_FUTURE;
6699 static inline abi_long host_to_target_stat64(void *cpu_env,
6700 abi_ulong target_addr,
6701 struct stat *host_st)
6703 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6704 if (((CPUARMState *)cpu_env)->eabi) {
6705 struct target_eabi_stat64 *target_st;
6707 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6708 return -TARGET_EFAULT;
6709 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6710 __put_user(host_st->st_dev, &target_st->st_dev);
6711 __put_user(host_st->st_ino, &target_st->st_ino);
6712 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6713 __put_user(host_st->st_ino, &target_st->__st_ino);
6715 __put_user(host_st->st_mode, &target_st->st_mode);
6716 __put_user(host_st->st_nlink, &target_st->st_nlink);
6717 __put_user(host_st->st_uid, &target_st->st_uid);
6718 __put_user(host_st->st_gid, &target_st->st_gid);
6719 __put_user(host_st->st_rdev, &target_st->st_rdev);
6720 __put_user(host_st->st_size, &target_st->st_size);
6721 __put_user(host_st->st_blksize, &target_st->st_blksize);
6722 __put_user(host_st->st_blocks, &target_st->st_blocks);
6723 __put_user(host_st->st_atime, &target_st->target_st_atime);
6724 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6725 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6726 unlock_user_struct(target_st, target_addr, 1);
6730 #if defined(TARGET_HAS_STRUCT_STAT64)
6731 struct target_stat64 *target_st;
6733 struct target_stat *target_st;
6736 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6737 return -TARGET_EFAULT;
6738 memset(target_st, 0, sizeof(*target_st));
6739 __put_user(host_st->st_dev, &target_st->st_dev);
6740 __put_user(host_st->st_ino, &target_st->st_ino);
6741 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6742 __put_user(host_st->st_ino, &target_st->__st_ino);
6744 __put_user(host_st->st_mode, &target_st->st_mode);
6745 __put_user(host_st->st_nlink, &target_st->st_nlink);
6746 __put_user(host_st->st_uid, &target_st->st_uid);
6747 __put_user(host_st->st_gid, &target_st->st_gid);
6748 __put_user(host_st->st_rdev, &target_st->st_rdev);
6749 /* XXX: better use of kernel struct */
6750 __put_user(host_st->st_size, &target_st->st_size);
6751 __put_user(host_st->st_blksize, &target_st->st_blksize);
6752 __put_user(host_st->st_blocks, &target_st->st_blocks);
6753 __put_user(host_st->st_atime, &target_st->target_st_atime);
6754 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6755 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6756 unlock_user_struct(target_st, target_addr, 1);
6762 /* ??? Using host futex calls even when target atomic operations
6763 are not really atomic probably breaks things. However implementing
6764 futexes locally would make futexes shared between multiple processes
6765 tricky. However they're probably useless because guest atomic
6766 operations won't work either. */
6767 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6768 target_ulong uaddr2, int val3)
6770 struct timespec ts, *pts;
6773 /* ??? We assume FUTEX_* constants are the same on both host
6775 #ifdef FUTEX_CMD_MASK
6776 base_op = op & FUTEX_CMD_MASK;
6782 case FUTEX_WAIT_BITSET:
6785 target_to_host_timespec(pts, timeout);
6789 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6792 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6794 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6796 case FUTEX_CMP_REQUEUE:
6798 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6799 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6800 But the prototype takes a `struct timespec *'; insert casts
6801 to satisfy the compiler. We do not need to tswap TIMEOUT
6802 since it's not compared to guest memory. */
6803 pts = (struct timespec *)(uintptr_t) timeout;
6804 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6806 (base_op == FUTEX_CMP_REQUEUE
6810 return -TARGET_ENOSYS;
6813 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6814 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6815 abi_long handle, abi_long mount_id,
6818 struct file_handle *target_fh;
6819 struct file_handle *fh;
6823 unsigned int size, total_size;
6825 if (get_user_s32(size, handle)) {
6826 return -TARGET_EFAULT;
6829 name = lock_user_string(pathname);
6831 return -TARGET_EFAULT;
6834 total_size = sizeof(struct file_handle) + size;
6835 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6837 unlock_user(name, pathname, 0);
6838 return -TARGET_EFAULT;
6841 fh = g_malloc0(total_size);
6842 fh->handle_bytes = size;
6844 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6845 unlock_user(name, pathname, 0);
6847 /* man name_to_handle_at(2):
6848 * Other than the use of the handle_bytes field, the caller should treat
6849 * the file_handle structure as an opaque data type
6852 memcpy(target_fh, fh, total_size);
6853 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6854 target_fh->handle_type = tswap32(fh->handle_type);
6856 unlock_user(target_fh, handle, total_size);
6858 if (put_user_s32(mid, mount_id)) {
6859 return -TARGET_EFAULT;
6867 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6868 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6871 struct file_handle *target_fh;
6872 struct file_handle *fh;
6873 unsigned int size, total_size;
6876 if (get_user_s32(size, handle)) {
6877 return -TARGET_EFAULT;
6880 total_size = sizeof(struct file_handle) + size;
6881 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6883 return -TARGET_EFAULT;
6886 fh = g_memdup(target_fh, total_size);
6887 fh->handle_bytes = size;
6888 fh->handle_type = tswap32(target_fh->handle_type);
6890 ret = get_errno(open_by_handle_at(mount_fd, fh,
6891 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6895 unlock_user(target_fh, handle, total_size);
6901 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6903 /* signalfd siginfo conversion */
6906 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6907 const struct signalfd_siginfo *info)
6909 int sig = host_to_target_signal(info->ssi_signo);
6911 /* linux/signalfd.h defines a ssi_addr_lsb
6912 * not defined in sys/signalfd.h but used by some kernels
6915 #ifdef BUS_MCEERR_AO
6916 if (tinfo->ssi_signo == SIGBUS &&
6917 (tinfo->ssi_code == BUS_MCEERR_AR ||
6918 tinfo->ssi_code == BUS_MCEERR_AO)) {
6919 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6920 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6921 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6925 tinfo->ssi_signo = tswap32(sig);
6926 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6927 tinfo->ssi_code = tswap32(info->ssi_code);
6928 tinfo->ssi_pid = tswap32(info->ssi_pid);
6929 tinfo->ssi_uid = tswap32(info->ssi_uid);
6930 tinfo->ssi_fd = tswap32(info->ssi_fd);
6931 tinfo->ssi_tid = tswap32(info->ssi_tid);
6932 tinfo->ssi_band = tswap32(info->ssi_band);
6933 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6934 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6935 tinfo->ssi_status = tswap32(info->ssi_status);
6936 tinfo->ssi_int = tswap32(info->ssi_int);
6937 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6938 tinfo->ssi_utime = tswap64(info->ssi_utime);
6939 tinfo->ssi_stime = tswap64(info->ssi_stime);
6940 tinfo->ssi_addr = tswap64(info->ssi_addr);
6943 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6947 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6948 host_to_target_signalfd_siginfo(buf + i, buf + i);
6954 static TargetFdTrans target_signalfd_trans = {
6955 .host_to_target_data = host_to_target_data_signalfd,
6958 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6961 target_sigset_t *target_mask;
6965 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6966 return -TARGET_EINVAL;
6968 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6969 return -TARGET_EFAULT;
6972 target_to_host_sigset(&host_mask, target_mask);
6974 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6976 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6978 fd_trans_register(ret, &target_signalfd_trans);
6981 unlock_user_struct(target_mask, mask, 0);
6987 /* Map host to target signal numbers for the wait family of syscalls.
6988 Assume all other status bits are the same. */
6989 int host_to_target_waitstatus(int status)
6991 if (WIFSIGNALED(status)) {
6992 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6994 if (WIFSTOPPED(status)) {
6995 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7001 static int open_self_cmdline(void *cpu_env, int fd)
7004 bool word_skipped = false;
7006 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7016 nb_read = read(fd_orig, buf, sizeof(buf));
7019 fd_orig = close(fd_orig);
7022 } else if (nb_read == 0) {
7026 if (!word_skipped) {
7027 /* Skip the first string, which is the path to qemu-*-static
7028 instead of the actual command. */
7029 cp_buf = memchr(buf, 0, nb_read);
7031 /* Null byte found, skip one string */
7033 nb_read -= cp_buf - buf;
7034 word_skipped = true;
7039 if (write(fd, cp_buf, nb_read) != nb_read) {
7048 return close(fd_orig);
7051 static int open_self_maps(void *cpu_env, int fd)
7053 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7054 TaskState *ts = cpu->opaque;
7060 fp = fopen("/proc/self/maps", "r");
7065 while ((read = getline(&line, &len, fp)) != -1) {
7066 int fields, dev_maj, dev_min, inode;
7067 uint64_t min, max, offset;
7068 char flag_r, flag_w, flag_x, flag_p;
7069 char path[512] = "";
7070 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7071 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7072 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7074 if ((fields < 10) || (fields > 11)) {
7077 if (h2g_valid(min)) {
7078 int flags = page_get_flags(h2g(min));
7079 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7080 if (page_check_range(h2g(min), max - min, flags) == -1) {
7083 if (h2g(min) == ts->info->stack_limit) {
7084 pstrcpy(path, sizeof(path), " [stack]");
7086 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7087 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7088 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7089 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7090 path[0] ? " " : "", path);
7100 static int open_self_stat(void *cpu_env, int fd)
7102 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7103 TaskState *ts = cpu->opaque;
7104 abi_ulong start_stack = ts->info->start_stack;
7107 for (i = 0; i < 44; i++) {
7115 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7116 } else if (i == 1) {
7118 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7119 } else if (i == 27) {
7122 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7124 /* for the rest, there is MasterCard */
7125 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7129 if (write(fd, buf, len) != len) {
7137 static int open_self_auxv(void *cpu_env, int fd)
7139 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7140 TaskState *ts = cpu->opaque;
7141 abi_ulong auxv = ts->info->saved_auxv;
7142 abi_ulong len = ts->info->auxv_len;
7146 * Auxiliary vector is stored in target process stack.
7147 * read in whole auxv vector and copy it to file
7149 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7153 r = write(fd, ptr, len);
7160 lseek(fd, 0, SEEK_SET);
7161 unlock_user(ptr, auxv, len);
7167 static int is_proc_myself(const char *filename, const char *entry)
7169 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7170 filename += strlen("/proc/");
7171 if (!strncmp(filename, "self/", strlen("self/"))) {
7172 filename += strlen("self/");
7173 } else if (*filename >= '1' && *filename <= '9') {
7175 snprintf(myself, sizeof(myself), "%d/", getpid());
7176 if (!strncmp(filename, myself, strlen(myself))) {
7177 filename += strlen(myself);
7184 if (!strcmp(filename, entry)) {
7191 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7192 static int is_proc(const char *filename, const char *entry)
7194 return strcmp(filename, entry) == 0;
7197 static int open_net_route(void *cpu_env, int fd)
7204 fp = fopen("/proc/net/route", "r");
7211 read = getline(&line, &len, fp);
7212 dprintf(fd, "%s", line);
7216 while ((read = getline(&line, &len, fp)) != -1) {
7218 uint32_t dest, gw, mask;
7219 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7220 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7221 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7222 &mask, &mtu, &window, &irtt);
7223 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7224 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7225 metric, tswap32(mask), mtu, window, irtt);
7235 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7238 const char *filename;
7239 int (*fill)(void *cpu_env, int fd);
7240 int (*cmp)(const char *s1, const char *s2);
7242 const struct fake_open *fake_open;
7243 static const struct fake_open fakes[] = {
7244 { "maps", open_self_maps, is_proc_myself },
7245 { "stat", open_self_stat, is_proc_myself },
7246 { "auxv", open_self_auxv, is_proc_myself },
7247 { "cmdline", open_self_cmdline, is_proc_myself },
7248 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7249 { "/proc/net/route", open_net_route, is_proc },
7251 { NULL, NULL, NULL }
7254 if (is_proc_myself(pathname, "exe")) {
7255 int execfd = qemu_getauxval(AT_EXECFD);
7256 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7259 for (fake_open = fakes; fake_open->filename; fake_open++) {
7260 if (fake_open->cmp(pathname, fake_open->filename)) {
7265 if (fake_open->filename) {
7267 char filename[PATH_MAX];
7270 /* create temporary file to map stat to */
7271 tmpdir = getenv("TMPDIR");
7274 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7275 fd = mkstemp(filename);
7281 if ((r = fake_open->fill(cpu_env, fd))) {
7287 lseek(fd, 0, SEEK_SET);
7292 return safe_openat(dirfd, path(pathname), flags, mode);
7295 #define TIMER_MAGIC 0x0caf0000
7296 #define TIMER_MAGIC_MASK 0xffff0000
7298 /* Convert QEMU provided timer ID back to internal 16bit index format */
7299 static target_timer_t get_timer_id(abi_long arg)
7301 target_timer_t timerid = arg;
7303 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7304 return -TARGET_EINVAL;
7309 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7310 return -TARGET_EINVAL;
7316 /* do_syscall() should always have a single exit point at the end so
7317 that actions, such as logging of syscall results, can be performed.
7318 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7319 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7320 abi_long arg2, abi_long arg3, abi_long arg4,
7321 abi_long arg5, abi_long arg6, abi_long arg7,
7324 CPUState *cpu = ENV_GET_CPU(cpu_env);
7330 #if defined(DEBUG_ERESTARTSYS)
7331 /* Debug-only code for exercising the syscall-restart code paths
7332 * in the per-architecture cpu main loops: restart every syscall
7333 * the guest makes once before letting it through.
7340 return -TARGET_ERESTARTSYS;
7346 gemu_log("syscall %d", num);
7348 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7350 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7353 case TARGET_NR_exit:
7354 /* In old applications this may be used to implement _exit(2).
7355 However in threaded applictions it is used for thread termination,
7356 and _exit_group is used for application termination.
7357 Do thread termination if we have more then one thread. */
7359 if (block_signals()) {
7360 ret = -TARGET_ERESTARTSYS;
7364 if (CPU_NEXT(first_cpu)) {
7368 /* Remove the CPU from the list. */
7369 QTAILQ_REMOVE(&cpus, cpu, node);
7372 if (ts->child_tidptr) {
7373 put_user_u32(0, ts->child_tidptr);
7374 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7378 object_unref(OBJECT(cpu));
7380 rcu_unregister_thread();
7386 gdb_exit(cpu_env, arg1);
7388 ret = 0; /* avoid warning */
7390 case TARGET_NR_read:
7394 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7396 ret = get_errno(safe_read(arg1, p, arg3));
7398 fd_trans_host_to_target_data(arg1)) {
7399 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7401 unlock_user(p, arg2, ret);
7404 case TARGET_NR_write:
7405 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7407 ret = get_errno(safe_write(arg1, p, arg3));
7408 unlock_user(p, arg2, 0);
7410 #ifdef TARGET_NR_open
7411 case TARGET_NR_open:
7412 if (!(p = lock_user_string(arg1)))
7414 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7415 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7417 fd_trans_unregister(ret);
7418 unlock_user(p, arg1, 0);
7421 case TARGET_NR_openat:
7422 if (!(p = lock_user_string(arg2)))
7424 ret = get_errno(do_openat(cpu_env, arg1, p,
7425 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7427 fd_trans_unregister(ret);
7428 unlock_user(p, arg2, 0);
7430 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7431 case TARGET_NR_name_to_handle_at:
7432 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7435 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7436 case TARGET_NR_open_by_handle_at:
7437 ret = do_open_by_handle_at(arg1, arg2, arg3);
7438 fd_trans_unregister(ret);
7441 case TARGET_NR_close:
7442 fd_trans_unregister(arg1);
7443 ret = get_errno(close(arg1));
7448 #ifdef TARGET_NR_fork
7449 case TARGET_NR_fork:
7450 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7453 #ifdef TARGET_NR_waitpid
7454 case TARGET_NR_waitpid:
7457 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7458 if (!is_error(ret) && arg2 && ret
7459 && put_user_s32(host_to_target_waitstatus(status), arg2))
7464 #ifdef TARGET_NR_waitid
7465 case TARGET_NR_waitid:
7469 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7470 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7471 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7473 host_to_target_siginfo(p, &info);
7474 unlock_user(p, arg3, sizeof(target_siginfo_t));
7479 #ifdef TARGET_NR_creat /* not on alpha */
7480 case TARGET_NR_creat:
7481 if (!(p = lock_user_string(arg1)))
7483 ret = get_errno(creat(p, arg2));
7484 fd_trans_unregister(ret);
7485 unlock_user(p, arg1, 0);
7488 #ifdef TARGET_NR_link
7489 case TARGET_NR_link:
7492 p = lock_user_string(arg1);
7493 p2 = lock_user_string(arg2);
7495 ret = -TARGET_EFAULT;
7497 ret = get_errno(link(p, p2));
7498 unlock_user(p2, arg2, 0);
7499 unlock_user(p, arg1, 0);
7503 #if defined(TARGET_NR_linkat)
7504 case TARGET_NR_linkat:
7509 p = lock_user_string(arg2);
7510 p2 = lock_user_string(arg4);
7512 ret = -TARGET_EFAULT;
7514 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7515 unlock_user(p, arg2, 0);
7516 unlock_user(p2, arg4, 0);
7520 #ifdef TARGET_NR_unlink
7521 case TARGET_NR_unlink:
7522 if (!(p = lock_user_string(arg1)))
7524 ret = get_errno(unlink(p));
7525 unlock_user(p, arg1, 0);
7528 #if defined(TARGET_NR_unlinkat)
7529 case TARGET_NR_unlinkat:
7530 if (!(p = lock_user_string(arg2)))
7532 ret = get_errno(unlinkat(arg1, p, arg3));
7533 unlock_user(p, arg2, 0);
7536 case TARGET_NR_execve:
7538 char **argp, **envp;
7541 abi_ulong guest_argp;
7542 abi_ulong guest_envp;
7549 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7550 if (get_user_ual(addr, gp))
7558 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7559 if (get_user_ual(addr, gp))
7566 argp = alloca((argc + 1) * sizeof(void *));
7567 envp = alloca((envc + 1) * sizeof(void *));
7569 for (gp = guest_argp, q = argp; gp;
7570 gp += sizeof(abi_ulong), q++) {
7571 if (get_user_ual(addr, gp))
7575 if (!(*q = lock_user_string(addr)))
7577 total_size += strlen(*q) + 1;
7581 for (gp = guest_envp, q = envp; gp;
7582 gp += sizeof(abi_ulong), q++) {
7583 if (get_user_ual(addr, gp))
7587 if (!(*q = lock_user_string(addr)))
7589 total_size += strlen(*q) + 1;
7593 if (!(p = lock_user_string(arg1)))
7595 /* Although execve() is not an interruptible syscall it is
7596 * a special case where we must use the safe_syscall wrapper:
7597 * if we allow a signal to happen before we make the host
7598 * syscall then we will 'lose' it, because at the point of
7599 * execve the process leaves QEMU's control. So we use the
7600 * safe syscall wrapper to ensure that we either take the
7601 * signal as a guest signal, or else it does not happen
7602 * before the execve completes and makes it the other
7603 * program's problem.
7605 ret = get_errno(safe_execve(p, argp, envp));
7606 unlock_user(p, arg1, 0);
7611 ret = -TARGET_EFAULT;
7614 for (gp = guest_argp, q = argp; *q;
7615 gp += sizeof(abi_ulong), q++) {
7616 if (get_user_ual(addr, gp)
7619 unlock_user(*q, addr, 0);
7621 for (gp = guest_envp, q = envp; *q;
7622 gp += sizeof(abi_ulong), q++) {
7623 if (get_user_ual(addr, gp)
7626 unlock_user(*q, addr, 0);
7630 case TARGET_NR_chdir:
7631 if (!(p = lock_user_string(arg1)))
7633 ret = get_errno(chdir(p));
7634 unlock_user(p, arg1, 0);
7636 #ifdef TARGET_NR_time
7637 case TARGET_NR_time:
7640 ret = get_errno(time(&host_time));
7643 && put_user_sal(host_time, arg1))
7648 #ifdef TARGET_NR_mknod
7649 case TARGET_NR_mknod:
7650 if (!(p = lock_user_string(arg1)))
7652 ret = get_errno(mknod(p, arg2, arg3));
7653 unlock_user(p, arg1, 0);
7656 #if defined(TARGET_NR_mknodat)
7657 case TARGET_NR_mknodat:
7658 if (!(p = lock_user_string(arg2)))
7660 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7661 unlock_user(p, arg2, 0);
7664 #ifdef TARGET_NR_chmod
7665 case TARGET_NR_chmod:
7666 if (!(p = lock_user_string(arg1)))
7668 ret = get_errno(chmod(p, arg2));
7669 unlock_user(p, arg1, 0);
7672 #ifdef TARGET_NR_break
7673 case TARGET_NR_break:
7676 #ifdef TARGET_NR_oldstat
7677 case TARGET_NR_oldstat:
7680 case TARGET_NR_lseek:
7681 ret = get_errno(lseek(arg1, arg2, arg3));
7683 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7684 /* Alpha specific */
7685 case TARGET_NR_getxpid:
7686 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7687 ret = get_errno(getpid());
7690 #ifdef TARGET_NR_getpid
7691 case TARGET_NR_getpid:
7692 ret = get_errno(getpid());
7695 case TARGET_NR_mount:
7697 /* need to look at the data field */
7701 p = lock_user_string(arg1);
7709 p2 = lock_user_string(arg2);
7712 unlock_user(p, arg1, 0);
7718 p3 = lock_user_string(arg3);
7721 unlock_user(p, arg1, 0);
7723 unlock_user(p2, arg2, 0);
7730 /* FIXME - arg5 should be locked, but it isn't clear how to
7731 * do that since it's not guaranteed to be a NULL-terminated
7735 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7737 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7739 ret = get_errno(ret);
7742 unlock_user(p, arg1, 0);
7744 unlock_user(p2, arg2, 0);
7746 unlock_user(p3, arg3, 0);
7750 #ifdef TARGET_NR_umount
7751 case TARGET_NR_umount:
7752 if (!(p = lock_user_string(arg1)))
7754 ret = get_errno(umount(p));
7755 unlock_user(p, arg1, 0);
7758 #ifdef TARGET_NR_stime /* not on alpha */
7759 case TARGET_NR_stime:
7762 if (get_user_sal(host_time, arg1))
7764 ret = get_errno(stime(&host_time));
7768 case TARGET_NR_ptrace:
7770 #ifdef TARGET_NR_alarm /* not on alpha */
7771 case TARGET_NR_alarm:
7775 #ifdef TARGET_NR_oldfstat
7776 case TARGET_NR_oldfstat:
7779 #ifdef TARGET_NR_pause /* not on alpha */
7780 case TARGET_NR_pause:
7781 if (!block_signals()) {
7782 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7784 ret = -TARGET_EINTR;
7787 #ifdef TARGET_NR_utime
7788 case TARGET_NR_utime:
7790 struct utimbuf tbuf, *host_tbuf;
7791 struct target_utimbuf *target_tbuf;
7793 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7795 tbuf.actime = tswapal(target_tbuf->actime);
7796 tbuf.modtime = tswapal(target_tbuf->modtime);
7797 unlock_user_struct(target_tbuf, arg2, 0);
7802 if (!(p = lock_user_string(arg1)))
7804 ret = get_errno(utime(p, host_tbuf));
7805 unlock_user(p, arg1, 0);
7809 #ifdef TARGET_NR_utimes
7810 case TARGET_NR_utimes:
7812 struct timeval *tvp, tv[2];
7814 if (copy_from_user_timeval(&tv[0], arg2)
7815 || copy_from_user_timeval(&tv[1],
7816 arg2 + sizeof(struct target_timeval)))
7822 if (!(p = lock_user_string(arg1)))
7824 ret = get_errno(utimes(p, tvp));
7825 unlock_user(p, arg1, 0);
7829 #if defined(TARGET_NR_futimesat)
7830 case TARGET_NR_futimesat:
7832 struct timeval *tvp, tv[2];
7834 if (copy_from_user_timeval(&tv[0], arg3)
7835 || copy_from_user_timeval(&tv[1],
7836 arg3 + sizeof(struct target_timeval)))
7842 if (!(p = lock_user_string(arg2)))
7844 ret = get_errno(futimesat(arg1, path(p), tvp));
7845 unlock_user(p, arg2, 0);
7849 #ifdef TARGET_NR_stty
7850 case TARGET_NR_stty:
7853 #ifdef TARGET_NR_gtty
7854 case TARGET_NR_gtty:
7857 #ifdef TARGET_NR_access
7858 case TARGET_NR_access:
7859 if (!(p = lock_user_string(arg1)))
7861 ret = get_errno(access(path(p), arg2));
7862 unlock_user(p, arg1, 0);
7865 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7866 case TARGET_NR_faccessat:
7867 if (!(p = lock_user_string(arg2)))
7869 ret = get_errno(faccessat(arg1, p, arg3, 0));
7870 unlock_user(p, arg2, 0);
7873 #ifdef TARGET_NR_nice /* not on alpha */
7874 case TARGET_NR_nice:
7875 ret = get_errno(nice(arg1));
7878 #ifdef TARGET_NR_ftime
7879 case TARGET_NR_ftime:
7882 case TARGET_NR_sync:
7886 case TARGET_NR_kill:
7887 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7889 #ifdef TARGET_NR_rename
7890 case TARGET_NR_rename:
7893 p = lock_user_string(arg1);
7894 p2 = lock_user_string(arg2);
7896 ret = -TARGET_EFAULT;
7898 ret = get_errno(rename(p, p2));
7899 unlock_user(p2, arg2, 0);
7900 unlock_user(p, arg1, 0);
7904 #if defined(TARGET_NR_renameat)
7905 case TARGET_NR_renameat:
7908 p = lock_user_string(arg2);
7909 p2 = lock_user_string(arg4);
7911 ret = -TARGET_EFAULT;
7913 ret = get_errno(renameat(arg1, p, arg3, p2));
7914 unlock_user(p2, arg4, 0);
7915 unlock_user(p, arg2, 0);
7919 #ifdef TARGET_NR_mkdir
7920 case TARGET_NR_mkdir:
7921 if (!(p = lock_user_string(arg1)))
7923 ret = get_errno(mkdir(p, arg2));
7924 unlock_user(p, arg1, 0);
7927 #if defined(TARGET_NR_mkdirat)
7928 case TARGET_NR_mkdirat:
7929 if (!(p = lock_user_string(arg2)))
7931 ret = get_errno(mkdirat(arg1, p, arg3));
7932 unlock_user(p, arg2, 0);
7935 #ifdef TARGET_NR_rmdir
7936 case TARGET_NR_rmdir:
7937 if (!(p = lock_user_string(arg1)))
7939 ret = get_errno(rmdir(p));
7940 unlock_user(p, arg1, 0);
7944 ret = get_errno(dup(arg1));
7946 fd_trans_dup(arg1, ret);
7949 #ifdef TARGET_NR_pipe
7950 case TARGET_NR_pipe:
7951 ret = do_pipe(cpu_env, arg1, 0, 0);
7954 #ifdef TARGET_NR_pipe2
7955 case TARGET_NR_pipe2:
7956 ret = do_pipe(cpu_env, arg1,
7957 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7960 case TARGET_NR_times:
7962 struct target_tms *tmsp;
7964 ret = get_errno(times(&tms));
7966 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7969 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7970 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7971 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7972 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7975 ret = host_to_target_clock_t(ret);
7978 #ifdef TARGET_NR_prof
7979 case TARGET_NR_prof:
7982 #ifdef TARGET_NR_signal
7983 case TARGET_NR_signal:
7986 case TARGET_NR_acct:
7988 ret = get_errno(acct(NULL));
7990 if (!(p = lock_user_string(arg1)))
7992 ret = get_errno(acct(path(p)));
7993 unlock_user(p, arg1, 0);
7996 #ifdef TARGET_NR_umount2
7997 case TARGET_NR_umount2:
7998 if (!(p = lock_user_string(arg1)))
8000 ret = get_errno(umount2(p, arg2));
8001 unlock_user(p, arg1, 0);
8004 #ifdef TARGET_NR_lock
8005 case TARGET_NR_lock:
8008 case TARGET_NR_ioctl:
8009 ret = do_ioctl(arg1, arg2, arg3);
8011 case TARGET_NR_fcntl:
8012 ret = do_fcntl(arg1, arg2, arg3);
8014 #ifdef TARGET_NR_mpx
8018 case TARGET_NR_setpgid:
8019 ret = get_errno(setpgid(arg1, arg2));
8021 #ifdef TARGET_NR_ulimit
8022 case TARGET_NR_ulimit:
8025 #ifdef TARGET_NR_oldolduname
8026 case TARGET_NR_oldolduname:
8029 case TARGET_NR_umask:
8030 ret = get_errno(umask(arg1));
8032 case TARGET_NR_chroot:
8033 if (!(p = lock_user_string(arg1)))
8035 ret = get_errno(chroot(p));
8036 unlock_user(p, arg1, 0);
8038 #ifdef TARGET_NR_ustat
8039 case TARGET_NR_ustat:
8042 #ifdef TARGET_NR_dup2
8043 case TARGET_NR_dup2:
8044 ret = get_errno(dup2(arg1, arg2));
8046 fd_trans_dup(arg1, arg2);
8050 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8051 case TARGET_NR_dup3:
8052 ret = get_errno(dup3(arg1, arg2, arg3));
8054 fd_trans_dup(arg1, arg2);
8058 #ifdef TARGET_NR_getppid /* not on alpha */
8059 case TARGET_NR_getppid:
8060 ret = get_errno(getppid());
8063 #ifdef TARGET_NR_getpgrp
8064 case TARGET_NR_getpgrp:
8065 ret = get_errno(getpgrp());
8068 case TARGET_NR_setsid:
8069 ret = get_errno(setsid());
8071 #ifdef TARGET_NR_sigaction
8072 case TARGET_NR_sigaction:
8074 #if defined(TARGET_ALPHA)
8075 struct target_sigaction act, oact, *pact = 0;
8076 struct target_old_sigaction *old_act;
8078 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8080 act._sa_handler = old_act->_sa_handler;
8081 target_siginitset(&act.sa_mask, old_act->sa_mask);
8082 act.sa_flags = old_act->sa_flags;
8083 act.sa_restorer = 0;
8084 unlock_user_struct(old_act, arg2, 0);
8087 ret = get_errno(do_sigaction(arg1, pact, &oact));
8088 if (!is_error(ret) && arg3) {
8089 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8091 old_act->_sa_handler = oact._sa_handler;
8092 old_act->sa_mask = oact.sa_mask.sig[0];
8093 old_act->sa_flags = oact.sa_flags;
8094 unlock_user_struct(old_act, arg3, 1);
8096 #elif defined(TARGET_MIPS)
8097 struct target_sigaction act, oact, *pact, *old_act;
8100 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8102 act._sa_handler = old_act->_sa_handler;
8103 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8104 act.sa_flags = old_act->sa_flags;
8105 unlock_user_struct(old_act, arg2, 0);
8111 ret = get_errno(do_sigaction(arg1, pact, &oact));
8113 if (!is_error(ret) && arg3) {
8114 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8116 old_act->_sa_handler = oact._sa_handler;
8117 old_act->sa_flags = oact.sa_flags;
8118 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8119 old_act->sa_mask.sig[1] = 0;
8120 old_act->sa_mask.sig[2] = 0;
8121 old_act->sa_mask.sig[3] = 0;
8122 unlock_user_struct(old_act, arg3, 1);
8125 struct target_old_sigaction *old_act;
8126 struct target_sigaction act, oact, *pact;
8128 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8130 act._sa_handler = old_act->_sa_handler;
8131 target_siginitset(&act.sa_mask, old_act->sa_mask);
8132 act.sa_flags = old_act->sa_flags;
8133 act.sa_restorer = old_act->sa_restorer;
8134 unlock_user_struct(old_act, arg2, 0);
8139 ret = get_errno(do_sigaction(arg1, pact, &oact));
8140 if (!is_error(ret) && arg3) {
8141 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8143 old_act->_sa_handler = oact._sa_handler;
8144 old_act->sa_mask = oact.sa_mask.sig[0];
8145 old_act->sa_flags = oact.sa_flags;
8146 old_act->sa_restorer = oact.sa_restorer;
8147 unlock_user_struct(old_act, arg3, 1);
8153 case TARGET_NR_rt_sigaction:
8155 #if defined(TARGET_ALPHA)
8156 struct target_sigaction act, oact, *pact = 0;
8157 struct target_rt_sigaction *rt_act;
8159 if (arg4 != sizeof(target_sigset_t)) {
8160 ret = -TARGET_EINVAL;
8164 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8166 act._sa_handler = rt_act->_sa_handler;
8167 act.sa_mask = rt_act->sa_mask;
8168 act.sa_flags = rt_act->sa_flags;
8169 act.sa_restorer = arg5;
8170 unlock_user_struct(rt_act, arg2, 0);
8173 ret = get_errno(do_sigaction(arg1, pact, &oact));
8174 if (!is_error(ret) && arg3) {
8175 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8177 rt_act->_sa_handler = oact._sa_handler;
8178 rt_act->sa_mask = oact.sa_mask;
8179 rt_act->sa_flags = oact.sa_flags;
8180 unlock_user_struct(rt_act, arg3, 1);
8183 struct target_sigaction *act;
8184 struct target_sigaction *oact;
8186 if (arg4 != sizeof(target_sigset_t)) {
8187 ret = -TARGET_EINVAL;
8191 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8196 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8197 ret = -TARGET_EFAULT;
8198 goto rt_sigaction_fail;
8202 ret = get_errno(do_sigaction(arg1, act, oact));
8205 unlock_user_struct(act, arg2, 0);
8207 unlock_user_struct(oact, arg3, 1);
8211 #ifdef TARGET_NR_sgetmask /* not on alpha */
8212 case TARGET_NR_sgetmask:
8215 abi_ulong target_set;
8216 ret = do_sigprocmask(0, NULL, &cur_set);
8218 host_to_target_old_sigset(&target_set, &cur_set);
8224 #ifdef TARGET_NR_ssetmask /* not on alpha */
8225 case TARGET_NR_ssetmask:
8227 sigset_t set, oset, cur_set;
8228 abi_ulong target_set = arg1;
8229 /* We only have one word of the new mask so we must read
8230 * the rest of it with do_sigprocmask() and OR in this word.
8231 * We are guaranteed that a do_sigprocmask() that only queries
8232 * the signal mask will not fail.
8234 ret = do_sigprocmask(0, NULL, &cur_set);
8236 target_to_host_old_sigset(&set, &target_set);
8237 sigorset(&set, &set, &cur_set);
8238 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8240 host_to_target_old_sigset(&target_set, &oset);
8246 #ifdef TARGET_NR_sigprocmask
8247 case TARGET_NR_sigprocmask:
8249 #if defined(TARGET_ALPHA)
8250 sigset_t set, oldset;
8255 case TARGET_SIG_BLOCK:
8258 case TARGET_SIG_UNBLOCK:
8261 case TARGET_SIG_SETMASK:
8265 ret = -TARGET_EINVAL;
8269 target_to_host_old_sigset(&set, &mask);
8271 ret = do_sigprocmask(how, &set, &oldset);
8272 if (!is_error(ret)) {
8273 host_to_target_old_sigset(&mask, &oldset);
8275 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8278 sigset_t set, oldset, *set_ptr;
8283 case TARGET_SIG_BLOCK:
8286 case TARGET_SIG_UNBLOCK:
8289 case TARGET_SIG_SETMASK:
8293 ret = -TARGET_EINVAL;
8296 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8298 target_to_host_old_sigset(&set, p);
8299 unlock_user(p, arg2, 0);
8305 ret = do_sigprocmask(how, set_ptr, &oldset);
8306 if (!is_error(ret) && arg3) {
8307 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8309 host_to_target_old_sigset(p, &oldset);
8310 unlock_user(p, arg3, sizeof(target_sigset_t));
8316 case TARGET_NR_rt_sigprocmask:
8319 sigset_t set, oldset, *set_ptr;
8321 if (arg4 != sizeof(target_sigset_t)) {
8322 ret = -TARGET_EINVAL;
8328 case TARGET_SIG_BLOCK:
8331 case TARGET_SIG_UNBLOCK:
8334 case TARGET_SIG_SETMASK:
8338 ret = -TARGET_EINVAL;
8341 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8343 target_to_host_sigset(&set, p);
8344 unlock_user(p, arg2, 0);
8350 ret = do_sigprocmask(how, set_ptr, &oldset);
8351 if (!is_error(ret) && arg3) {
8352 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8354 host_to_target_sigset(p, &oldset);
8355 unlock_user(p, arg3, sizeof(target_sigset_t));
8359 #ifdef TARGET_NR_sigpending
8360 case TARGET_NR_sigpending:
8363 ret = get_errno(sigpending(&set));
8364 if (!is_error(ret)) {
8365 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8367 host_to_target_old_sigset(p, &set);
8368 unlock_user(p, arg1, sizeof(target_sigset_t));
8373 case TARGET_NR_rt_sigpending:
8377 /* Yes, this check is >, not != like most. We follow the kernel's
8378 * logic and it does it like this because it implements
8379 * NR_sigpending through the same code path, and in that case
8380 * the old_sigset_t is smaller in size.
8382 if (arg2 > sizeof(target_sigset_t)) {
8383 ret = -TARGET_EINVAL;
8387 ret = get_errno(sigpending(&set));
8388 if (!is_error(ret)) {
8389 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8391 host_to_target_sigset(p, &set);
8392 unlock_user(p, arg1, sizeof(target_sigset_t));
8396 #ifdef TARGET_NR_sigsuspend
8397 case TARGET_NR_sigsuspend:
8399 TaskState *ts = cpu->opaque;
8400 #if defined(TARGET_ALPHA)
8401 abi_ulong mask = arg1;
8402 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8404 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8406 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8407 unlock_user(p, arg1, 0);
8409 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8411 if (ret != -TARGET_ERESTARTSYS) {
8412 ts->in_sigsuspend = 1;
8417 case TARGET_NR_rt_sigsuspend:
8419 TaskState *ts = cpu->opaque;
8421 if (arg2 != sizeof(target_sigset_t)) {
8422 ret = -TARGET_EINVAL;
8425 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8427 target_to_host_sigset(&ts->sigsuspend_mask, p);
8428 unlock_user(p, arg1, 0);
8429 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8431 if (ret != -TARGET_ERESTARTSYS) {
8432 ts->in_sigsuspend = 1;
8436 case TARGET_NR_rt_sigtimedwait:
8439 struct timespec uts, *puts;
8442 if (arg4 != sizeof(target_sigset_t)) {
8443 ret = -TARGET_EINVAL;
8447 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8449 target_to_host_sigset(&set, p);
8450 unlock_user(p, arg1, 0);
8453 target_to_host_timespec(puts, arg3);
8457 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8459 if (!is_error(ret)) {
8461 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8466 host_to_target_siginfo(p, &uinfo);
8467 unlock_user(p, arg2, sizeof(target_siginfo_t));
8469 ret = host_to_target_signal(ret);
8473 case TARGET_NR_rt_sigqueueinfo:
8477 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8481 target_to_host_siginfo(&uinfo, p);
8482 unlock_user(p, arg1, 0);
8483 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8486 #ifdef TARGET_NR_sigreturn
8487 case TARGET_NR_sigreturn:
8488 if (block_signals()) {
8489 ret = -TARGET_ERESTARTSYS;
8491 ret = do_sigreturn(cpu_env);
8495 case TARGET_NR_rt_sigreturn:
8496 if (block_signals()) {
8497 ret = -TARGET_ERESTARTSYS;
8499 ret = do_rt_sigreturn(cpu_env);
8502 case TARGET_NR_sethostname:
8503 if (!(p = lock_user_string(arg1)))
8505 ret = get_errno(sethostname(p, arg2));
8506 unlock_user(p, arg1, 0);
8508 case TARGET_NR_setrlimit:
8510 int resource = target_to_host_resource(arg1);
8511 struct target_rlimit *target_rlim;
8513 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8515 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8516 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8517 unlock_user_struct(target_rlim, arg2, 0);
8518 ret = get_errno(setrlimit(resource, &rlim));
8521 case TARGET_NR_getrlimit:
8523 int resource = target_to_host_resource(arg1);
8524 struct target_rlimit *target_rlim;
8527 ret = get_errno(getrlimit(resource, &rlim));
8528 if (!is_error(ret)) {
8529 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8531 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8532 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8533 unlock_user_struct(target_rlim, arg2, 1);
8537 case TARGET_NR_getrusage:
8539 struct rusage rusage;
8540 ret = get_errno(getrusage(arg1, &rusage));
8541 if (!is_error(ret)) {
8542 ret = host_to_target_rusage(arg2, &rusage);
8546 case TARGET_NR_gettimeofday:
8549 ret = get_errno(gettimeofday(&tv, NULL));
8550 if (!is_error(ret)) {
8551 if (copy_to_user_timeval(arg1, &tv))
8556 case TARGET_NR_settimeofday:
8558 struct timeval tv, *ptv = NULL;
8559 struct timezone tz, *ptz = NULL;
8562 if (copy_from_user_timeval(&tv, arg1)) {
8569 if (copy_from_user_timezone(&tz, arg2)) {
8575 ret = get_errno(settimeofday(ptv, ptz));
8578 #if defined(TARGET_NR_select)
8579 case TARGET_NR_select:
8580 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8581 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8584 struct target_sel_arg_struct *sel;
8585 abi_ulong inp, outp, exp, tvp;
8588 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
8590 nsel = tswapal(sel->n);
8591 inp = tswapal(sel->inp);
8592 outp = tswapal(sel->outp);
8593 exp = tswapal(sel->exp);
8594 tvp = tswapal(sel->tvp);
8595 unlock_user_struct(sel, arg1, 0);
8596 ret = do_select(nsel, inp, outp, exp, tvp);
8601 #ifdef TARGET_NR_pselect6
8602 case TARGET_NR_pselect6:
8604 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8605 fd_set rfds, wfds, efds;
8606 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8607 struct timespec ts, *ts_ptr;
8610 * The 6th arg is actually two args smashed together,
8611 * so we cannot use the C library.
8619 abi_ulong arg_sigset, arg_sigsize, *arg7;
8620 target_sigset_t *target_sigset;
8628 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8632 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8636 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8642 * This takes a timespec, and not a timeval, so we cannot
8643 * use the do_select() helper ...
8646 if (target_to_host_timespec(&ts, ts_addr)) {
8654 /* Extract the two packed args for the sigset */
8657 sig.size = SIGSET_T_SIZE;
8659 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8663 arg_sigset = tswapal(arg7[0]);
8664 arg_sigsize = tswapal(arg7[1]);
8665 unlock_user(arg7, arg6, 0);
8669 if (arg_sigsize != sizeof(*target_sigset)) {
8670 /* Like the kernel, we enforce correct size sigsets */
8671 ret = -TARGET_EINVAL;
8674 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8675 sizeof(*target_sigset), 1);
8676 if (!target_sigset) {
8679 target_to_host_sigset(&set, target_sigset);
8680 unlock_user(target_sigset, arg_sigset, 0);
8688 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8691 if (!is_error(ret)) {
8692 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8694 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8696 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8699 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8705 #ifdef TARGET_NR_symlink
8706 case TARGET_NR_symlink:
8709 p = lock_user_string(arg1);
8710 p2 = lock_user_string(arg2);
8712 ret = -TARGET_EFAULT;
8714 ret = get_errno(symlink(p, p2));
8715 unlock_user(p2, arg2, 0);
8716 unlock_user(p, arg1, 0);
8720 #if defined(TARGET_NR_symlinkat)
8721 case TARGET_NR_symlinkat:
8724 p = lock_user_string(arg1);
8725 p2 = lock_user_string(arg3);
8727 ret = -TARGET_EFAULT;
8729 ret = get_errno(symlinkat(p, arg2, p2));
8730 unlock_user(p2, arg3, 0);
8731 unlock_user(p, arg1, 0);
8735 #ifdef TARGET_NR_oldlstat
8736 case TARGET_NR_oldlstat:
8739 #ifdef TARGET_NR_readlink
8740 case TARGET_NR_readlink:
8743 p = lock_user_string(arg1);
8744 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8746 ret = -TARGET_EFAULT;
8748 /* Short circuit this for the magic exe check. */
8749 ret = -TARGET_EINVAL;
8750 } else if (is_proc_myself((const char *)p, "exe")) {
8751 char real[PATH_MAX], *temp;
8752 temp = realpath(exec_path, real);
8753 /* Return value is # of bytes that we wrote to the buffer. */
8755 ret = get_errno(-1);
8757 /* Don't worry about sign mismatch as earlier mapping
8758 * logic would have thrown a bad address error. */
8759 ret = MIN(strlen(real), arg3);
8760 /* We cannot NUL terminate the string. */
8761 memcpy(p2, real, ret);
8764 ret = get_errno(readlink(path(p), p2, arg3));
8766 unlock_user(p2, arg2, ret);
8767 unlock_user(p, arg1, 0);
8771 #if defined(TARGET_NR_readlinkat)
8772 case TARGET_NR_readlinkat:
8775 p = lock_user_string(arg2);
8776 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8778 ret = -TARGET_EFAULT;
8779 } else if (is_proc_myself((const char *)p, "exe")) {
8780 char real[PATH_MAX], *temp;
8781 temp = realpath(exec_path, real);
8782 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8783 snprintf((char *)p2, arg4, "%s", real);
8785 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8787 unlock_user(p2, arg3, ret);
8788 unlock_user(p, arg2, 0);
8792 #ifdef TARGET_NR_uselib
8793 case TARGET_NR_uselib:
8796 #ifdef TARGET_NR_swapon
8797 case TARGET_NR_swapon:
8798 if (!(p = lock_user_string(arg1)))
8800 ret = get_errno(swapon(p, arg2));
8801 unlock_user(p, arg1, 0);
8804 case TARGET_NR_reboot:
8805 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8806 /* arg4 must be ignored in all other cases */
8807 p = lock_user_string(arg4);
8811 ret = get_errno(reboot(arg1, arg2, arg3, p));
8812 unlock_user(p, arg4, 0);
8814 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8817 #ifdef TARGET_NR_readdir
8818 case TARGET_NR_readdir:
8821 #ifdef TARGET_NR_mmap
8822 case TARGET_NR_mmap:
8823 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8824 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8825 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8826 || defined(TARGET_S390X)
8829 abi_ulong v1, v2, v3, v4, v5, v6;
8830 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8838 unlock_user(v, arg1, 0);
8839 ret = get_errno(target_mmap(v1, v2, v3,
8840 target_to_host_bitmask(v4, mmap_flags_tbl),
8844 ret = get_errno(target_mmap(arg1, arg2, arg3,
8845 target_to_host_bitmask(arg4, mmap_flags_tbl),
8851 #ifdef TARGET_NR_mmap2
8852 case TARGET_NR_mmap2:
8854 #define MMAP_SHIFT 12
8856 ret = get_errno(target_mmap(arg1, arg2, arg3,
8857 target_to_host_bitmask(arg4, mmap_flags_tbl),
8859 arg6 << MMAP_SHIFT));
8862 case TARGET_NR_munmap:
8863 ret = get_errno(target_munmap(arg1, arg2));
8865 case TARGET_NR_mprotect:
8867 TaskState *ts = cpu->opaque;
8868 /* Special hack to detect libc making the stack executable. */
8869 if ((arg3 & PROT_GROWSDOWN)
8870 && arg1 >= ts->info->stack_limit
8871 && arg1 <= ts->info->start_stack) {
8872 arg3 &= ~PROT_GROWSDOWN;
8873 arg2 = arg2 + arg1 - ts->info->stack_limit;
8874 arg1 = ts->info->stack_limit;
8877 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8879 #ifdef TARGET_NR_mremap
8880 case TARGET_NR_mremap:
8881 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8884 /* ??? msync/mlock/munlock are broken for softmmu. */
8885 #ifdef TARGET_NR_msync
8886 case TARGET_NR_msync:
8887 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8890 #ifdef TARGET_NR_mlock
8891 case TARGET_NR_mlock:
8892 ret = get_errno(mlock(g2h(arg1), arg2));
8895 #ifdef TARGET_NR_munlock
8896 case TARGET_NR_munlock:
8897 ret = get_errno(munlock(g2h(arg1), arg2));
8900 #ifdef TARGET_NR_mlockall
8901 case TARGET_NR_mlockall:
8902 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8905 #ifdef TARGET_NR_munlockall
8906 case TARGET_NR_munlockall:
8907 ret = get_errno(munlockall());
8910 case TARGET_NR_truncate:
8911 if (!(p = lock_user_string(arg1)))
8913 ret = get_errno(truncate(p, arg2));
8914 unlock_user(p, arg1, 0);
8916 case TARGET_NR_ftruncate:
8917 ret = get_errno(ftruncate(arg1, arg2));
8919 case TARGET_NR_fchmod:
8920 ret = get_errno(fchmod(arg1, arg2));
8922 #if defined(TARGET_NR_fchmodat)
8923 case TARGET_NR_fchmodat:
8924 if (!(p = lock_user_string(arg2)))
8926 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8927 unlock_user(p, arg2, 0);
8930 case TARGET_NR_getpriority:
8931 /* Note that negative values are valid for getpriority, so we must
8932 differentiate based on errno settings. */
8934 ret = getpriority(arg1, arg2);
8935 if (ret == -1 && errno != 0) {
8936 ret = -host_to_target_errno(errno);
8940 /* Return value is the unbiased priority. Signal no error. */
8941 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8943 /* Return value is a biased priority to avoid negative numbers. */
8947 case TARGET_NR_setpriority:
8948 ret = get_errno(setpriority(arg1, arg2, arg3));
8950 #ifdef TARGET_NR_profil
8951 case TARGET_NR_profil:
8954 case TARGET_NR_statfs:
8955 if (!(p = lock_user_string(arg1)))
8957 ret = get_errno(statfs(path(p), &stfs));
8958 unlock_user(p, arg1, 0);
8960 if (!is_error(ret)) {
8961 struct target_statfs *target_stfs;
8963 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8965 __put_user(stfs.f_type, &target_stfs->f_type);
8966 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8967 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8968 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8969 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8970 __put_user(stfs.f_files, &target_stfs->f_files);
8971 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8972 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8973 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8974 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8975 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8976 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8977 unlock_user_struct(target_stfs, arg2, 1);
8980 case TARGET_NR_fstatfs:
8981 ret = get_errno(fstatfs(arg1, &stfs));
8982 goto convert_statfs;
8983 #ifdef TARGET_NR_statfs64
8984 case TARGET_NR_statfs64:
8985 if (!(p = lock_user_string(arg1)))
8987 ret = get_errno(statfs(path(p), &stfs));
8988 unlock_user(p, arg1, 0);
8990 if (!is_error(ret)) {
8991 struct target_statfs64 *target_stfs;
8993 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8995 __put_user(stfs.f_type, &target_stfs->f_type);
8996 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8997 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8998 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8999 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9000 __put_user(stfs.f_files, &target_stfs->f_files);
9001 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9002 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9003 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9004 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9005 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9006 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9007 unlock_user_struct(target_stfs, arg3, 1);
9010 case TARGET_NR_fstatfs64:
9011 ret = get_errno(fstatfs(arg1, &stfs));
9012 goto convert_statfs64;
9014 #ifdef TARGET_NR_ioperm
9015 case TARGET_NR_ioperm:
9018 #ifdef TARGET_NR_socketcall
9019 case TARGET_NR_socketcall:
9020 ret = do_socketcall(arg1, arg2);
9023 #ifdef TARGET_NR_accept
9024 case TARGET_NR_accept:
9025 ret = do_accept4(arg1, arg2, arg3, 0);
9028 #ifdef TARGET_NR_accept4
9029 case TARGET_NR_accept4:
9030 ret = do_accept4(arg1, arg2, arg3, arg4);
9033 #ifdef TARGET_NR_bind
9034 case TARGET_NR_bind:
9035 ret = do_bind(arg1, arg2, arg3);
9038 #ifdef TARGET_NR_connect
9039 case TARGET_NR_connect:
9040 ret = do_connect(arg1, arg2, arg3);
9043 #ifdef TARGET_NR_getpeername
9044 case TARGET_NR_getpeername:
9045 ret = do_getpeername(arg1, arg2, arg3);
9048 #ifdef TARGET_NR_getsockname
9049 case TARGET_NR_getsockname:
9050 ret = do_getsockname(arg1, arg2, arg3);
9053 #ifdef TARGET_NR_getsockopt
9054 case TARGET_NR_getsockopt:
9055 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9058 #ifdef TARGET_NR_listen
9059 case TARGET_NR_listen:
9060 ret = get_errno(listen(arg1, arg2));
9063 #ifdef TARGET_NR_recv
9064 case TARGET_NR_recv:
9065 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9068 #ifdef TARGET_NR_recvfrom
9069 case TARGET_NR_recvfrom:
9070 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9073 #ifdef TARGET_NR_recvmsg
9074 case TARGET_NR_recvmsg:
9075 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9078 #ifdef TARGET_NR_send
9079 case TARGET_NR_send:
9080 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9083 #ifdef TARGET_NR_sendmsg
9084 case TARGET_NR_sendmsg:
9085 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9088 #ifdef TARGET_NR_sendmmsg
9089 case TARGET_NR_sendmmsg:
9090 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9092 case TARGET_NR_recvmmsg:
9093 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9096 #ifdef TARGET_NR_sendto
9097 case TARGET_NR_sendto:
9098 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9101 #ifdef TARGET_NR_shutdown
9102 case TARGET_NR_shutdown:
9103 ret = get_errno(shutdown(arg1, arg2));
9106 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9107 case TARGET_NR_getrandom:
9108 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9112 ret = get_errno(getrandom(p, arg2, arg3));
9113 unlock_user(p, arg1, ret);
9116 #ifdef TARGET_NR_socket
9117 case TARGET_NR_socket:
9118 ret = do_socket(arg1, arg2, arg3);
9119 fd_trans_unregister(ret);
9122 #ifdef TARGET_NR_socketpair
9123 case TARGET_NR_socketpair:
9124 ret = do_socketpair(arg1, arg2, arg3, arg4);
9127 #ifdef TARGET_NR_setsockopt
9128 case TARGET_NR_setsockopt:
9129 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9133 case TARGET_NR_syslog:
9134 if (!(p = lock_user_string(arg2)))
9136 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9137 unlock_user(p, arg2, 0);
9140 case TARGET_NR_setitimer:
9142 struct itimerval value, ovalue, *pvalue;
9146 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9147 || copy_from_user_timeval(&pvalue->it_value,
9148 arg2 + sizeof(struct target_timeval)))
9153 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9154 if (!is_error(ret) && arg3) {
9155 if (copy_to_user_timeval(arg3,
9156 &ovalue.it_interval)
9157 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9163 case TARGET_NR_getitimer:
9165 struct itimerval value;
9167 ret = get_errno(getitimer(arg1, &value));
9168 if (!is_error(ret) && arg2) {
9169 if (copy_to_user_timeval(arg2,
9171 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9177 #ifdef TARGET_NR_stat
9178 case TARGET_NR_stat:
9179 if (!(p = lock_user_string(arg1)))
9181 ret = get_errno(stat(path(p), &st));
9182 unlock_user(p, arg1, 0);
9185 #ifdef TARGET_NR_lstat
9186 case TARGET_NR_lstat:
9187 if (!(p = lock_user_string(arg1)))
9189 ret = get_errno(lstat(path(p), &st));
9190 unlock_user(p, arg1, 0);
9193 case TARGET_NR_fstat:
9195 ret = get_errno(fstat(arg1, &st));
9196 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9199 if (!is_error(ret)) {
9200 struct target_stat *target_st;
9202 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9204 memset(target_st, 0, sizeof(*target_st));
9205 __put_user(st.st_dev, &target_st->st_dev);
9206 __put_user(st.st_ino, &target_st->st_ino);
9207 __put_user(st.st_mode, &target_st->st_mode);
9208 __put_user(st.st_uid, &target_st->st_uid);
9209 __put_user(st.st_gid, &target_st->st_gid);
9210 __put_user(st.st_nlink, &target_st->st_nlink);
9211 __put_user(st.st_rdev, &target_st->st_rdev);
9212 __put_user(st.st_size, &target_st->st_size);
9213 __put_user(st.st_blksize, &target_st->st_blksize);
9214 __put_user(st.st_blocks, &target_st->st_blocks);
9215 __put_user(st.st_atime, &target_st->target_st_atime);
9216 __put_user(st.st_mtime, &target_st->target_st_mtime);
9217 __put_user(st.st_ctime, &target_st->target_st_ctime);
9218 unlock_user_struct(target_st, arg2, 1);
9222 #ifdef TARGET_NR_olduname
9223 case TARGET_NR_olduname:
9226 #ifdef TARGET_NR_iopl
9227 case TARGET_NR_iopl:
9230 case TARGET_NR_vhangup:
9231 ret = get_errno(vhangup());
9233 #ifdef TARGET_NR_idle
9234 case TARGET_NR_idle:
9237 #ifdef TARGET_NR_syscall
9238 case TARGET_NR_syscall:
9239 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9240 arg6, arg7, arg8, 0);
9243 case TARGET_NR_wait4:
9246 abi_long status_ptr = arg2;
9247 struct rusage rusage, *rusage_ptr;
9248 abi_ulong target_rusage = arg4;
9249 abi_long rusage_err;
9251 rusage_ptr = &rusage;
9254 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9255 if (!is_error(ret)) {
9256 if (status_ptr && ret) {
9257 status = host_to_target_waitstatus(status);
9258 if (put_user_s32(status, status_ptr))
9261 if (target_rusage) {
9262 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9270 #ifdef TARGET_NR_swapoff
9271 case TARGET_NR_swapoff:
9272 if (!(p = lock_user_string(arg1)))
9274 ret = get_errno(swapoff(p));
9275 unlock_user(p, arg1, 0);
9278 case TARGET_NR_sysinfo:
9280 struct target_sysinfo *target_value;
9281 struct sysinfo value;
9282 ret = get_errno(sysinfo(&value));
9283 if (!is_error(ret) && arg1)
9285 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9287 __put_user(value.uptime, &target_value->uptime);
9288 __put_user(value.loads[0], &target_value->loads[0]);
9289 __put_user(value.loads[1], &target_value->loads[1]);
9290 __put_user(value.loads[2], &target_value->loads[2]);
9291 __put_user(value.totalram, &target_value->totalram);
9292 __put_user(value.freeram, &target_value->freeram);
9293 __put_user(value.sharedram, &target_value->sharedram);
9294 __put_user(value.bufferram, &target_value->bufferram);
9295 __put_user(value.totalswap, &target_value->totalswap);
9296 __put_user(value.freeswap, &target_value->freeswap);
9297 __put_user(value.procs, &target_value->procs);
9298 __put_user(value.totalhigh, &target_value->totalhigh);
9299 __put_user(value.freehigh, &target_value->freehigh);
9300 __put_user(value.mem_unit, &target_value->mem_unit);
9301 unlock_user_struct(target_value, arg1, 1);
9305 #ifdef TARGET_NR_ipc
9307 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
9310 #ifdef TARGET_NR_semget
9311 case TARGET_NR_semget:
9312 ret = get_errno(semget(arg1, arg2, arg3));
9315 #ifdef TARGET_NR_semop
9316 case TARGET_NR_semop:
9317 ret = do_semop(arg1, arg2, arg3);
9320 #ifdef TARGET_NR_semctl
9321 case TARGET_NR_semctl:
9322 ret = do_semctl(arg1, arg2, arg3, arg4);
9325 #ifdef TARGET_NR_msgctl
9326 case TARGET_NR_msgctl:
9327 ret = do_msgctl(arg1, arg2, arg3);
9330 #ifdef TARGET_NR_msgget
9331 case TARGET_NR_msgget:
9332 ret = get_errno(msgget(arg1, arg2));
9335 #ifdef TARGET_NR_msgrcv
9336 case TARGET_NR_msgrcv:
9337 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9340 #ifdef TARGET_NR_msgsnd
9341 case TARGET_NR_msgsnd:
9342 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9345 #ifdef TARGET_NR_shmget
9346 case TARGET_NR_shmget:
9347 ret = get_errno(shmget(arg1, arg2, arg3));
9350 #ifdef TARGET_NR_shmctl
9351 case TARGET_NR_shmctl:
9352 ret = do_shmctl(arg1, arg2, arg3);
9355 #ifdef TARGET_NR_shmat
9356 case TARGET_NR_shmat:
9357 ret = do_shmat(arg1, arg2, arg3);
9360 #ifdef TARGET_NR_shmdt
9361 case TARGET_NR_shmdt:
9362 ret = do_shmdt(arg1);
9365 case TARGET_NR_fsync:
9366 ret = get_errno(fsync(arg1));
9368 case TARGET_NR_clone:
9369 /* Linux manages to have three different orderings for its
9370 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9371 * match the kernel's CONFIG_CLONE_* settings.
9372 * Microblaze is further special in that it uses a sixth
9373 * implicit argument to clone for the TLS pointer.
9375 #if defined(TARGET_MICROBLAZE)
9376 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9377 #elif defined(TARGET_CLONE_BACKWARDS)
9378 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9379 #elif defined(TARGET_CLONE_BACKWARDS2)
9380 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9382 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9385 #ifdef __NR_exit_group
9386 /* new thread calls */
9387 case TARGET_NR_exit_group:
9391 gdb_exit(cpu_env, arg1);
9392 ret = get_errno(exit_group(arg1));
9395 case TARGET_NR_setdomainname:
9396 if (!(p = lock_user_string(arg1)))
9398 ret = get_errno(setdomainname(p, arg2));
9399 unlock_user(p, arg1, 0);
9401 case TARGET_NR_uname:
9402 /* no need to transcode because we use the linux syscall */
9404 struct new_utsname * buf;
9406 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9408 ret = get_errno(sys_uname(buf));
9409 if (!is_error(ret)) {
9410 /* Overwrite the native machine name with whatever is being
9412 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9413 /* Allow the user to override the reported release. */
9414 if (qemu_uname_release && *qemu_uname_release) {
9415 g_strlcpy(buf->release, qemu_uname_release,
9416 sizeof(buf->release));
9419 unlock_user_struct(buf, arg1, 1);
9423 case TARGET_NR_modify_ldt:
9424 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9426 #if !defined(TARGET_X86_64)
9427 case TARGET_NR_vm86old:
9429 case TARGET_NR_vm86:
9430 ret = do_vm86(cpu_env, arg1, arg2);
9434 case TARGET_NR_adjtimex:
9436 #ifdef TARGET_NR_create_module
9437 case TARGET_NR_create_module:
9439 case TARGET_NR_init_module:
9440 case TARGET_NR_delete_module:
9441 #ifdef TARGET_NR_get_kernel_syms
9442 case TARGET_NR_get_kernel_syms:
9445 case TARGET_NR_quotactl:
9447 case TARGET_NR_getpgid:
9448 ret = get_errno(getpgid(arg1));
9450 case TARGET_NR_fchdir:
9451 ret = get_errno(fchdir(arg1));
9453 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9454 case TARGET_NR_bdflush:
9457 #ifdef TARGET_NR_sysfs
9458 case TARGET_NR_sysfs:
9461 case TARGET_NR_personality:
9462 ret = get_errno(personality(arg1));
9464 #ifdef TARGET_NR_afs_syscall
9465 case TARGET_NR_afs_syscall:
9468 #ifdef TARGET_NR__llseek /* Not on alpha */
9469 case TARGET_NR__llseek:
9472 #if !defined(__NR_llseek)
9473 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9475 ret = get_errno(res);
9480 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9482 if ((ret == 0) && put_user_s64(res, arg4)) {
9488 #ifdef TARGET_NR_getdents
9489 case TARGET_NR_getdents:
9490 #ifdef __NR_getdents
9491 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9493 struct target_dirent *target_dirp;
9494 struct linux_dirent *dirp;
9495 abi_long count = arg3;
9497 dirp = g_try_malloc(count);
9499 ret = -TARGET_ENOMEM;
9503 ret = get_errno(sys_getdents(arg1, dirp, count));
9504 if (!is_error(ret)) {
9505 struct linux_dirent *de;
9506 struct target_dirent *tde;
9508 int reclen, treclen;
9509 int count1, tnamelen;
9513 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9517 reclen = de->d_reclen;
9518 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9519 assert(tnamelen >= 0);
9520 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9521 assert(count1 + treclen <= count);
9522 tde->d_reclen = tswap16(treclen);
9523 tde->d_ino = tswapal(de->d_ino);
9524 tde->d_off = tswapal(de->d_off);
9525 memcpy(tde->d_name, de->d_name, tnamelen);
9526 de = (struct linux_dirent *)((char *)de + reclen);
9528 tde = (struct target_dirent *)((char *)tde + treclen);
9532 unlock_user(target_dirp, arg2, ret);
9538 struct linux_dirent *dirp;
9539 abi_long count = arg3;
9541 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9543 ret = get_errno(sys_getdents(arg1, dirp, count));
9544 if (!is_error(ret)) {
9545 struct linux_dirent *de;
9550 reclen = de->d_reclen;
9553 de->d_reclen = tswap16(reclen);
9554 tswapls(&de->d_ino);
9555 tswapls(&de->d_off);
9556 de = (struct linux_dirent *)((char *)de + reclen);
9560 unlock_user(dirp, arg2, ret);
9564 /* Implement getdents in terms of getdents64 */
9566 struct linux_dirent64 *dirp;
9567 abi_long count = arg3;
9569 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9573 ret = get_errno(sys_getdents64(arg1, dirp, count));
9574 if (!is_error(ret)) {
9575 /* Convert the dirent64 structs to target dirent. We do this
9576 * in-place, since we can guarantee that a target_dirent is no
9577 * larger than a dirent64; however this means we have to be
9578 * careful to read everything before writing in the new format.
9580 struct linux_dirent64 *de;
9581 struct target_dirent *tde;
9586 tde = (struct target_dirent *)dirp;
9588 int namelen, treclen;
9589 int reclen = de->d_reclen;
9590 uint64_t ino = de->d_ino;
9591 int64_t off = de->d_off;
9592 uint8_t type = de->d_type;
9594 namelen = strlen(de->d_name);
9595 treclen = offsetof(struct target_dirent, d_name)
9597 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9599 memmove(tde->d_name, de->d_name, namelen + 1);
9600 tde->d_ino = tswapal(ino);
9601 tde->d_off = tswapal(off);
9602 tde->d_reclen = tswap16(treclen);
9603 /* The target_dirent type is in what was formerly a padding
9604 * byte at the end of the structure:
9606 *(((char *)tde) + treclen - 1) = type;
9608 de = (struct linux_dirent64 *)((char *)de + reclen);
9609 tde = (struct target_dirent *)((char *)tde + treclen);
9615 unlock_user(dirp, arg2, ret);
9619 #endif /* TARGET_NR_getdents */
9620 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9621 case TARGET_NR_getdents64:
9623 struct linux_dirent64 *dirp;
9624 abi_long count = arg3;
9625 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9627 ret = get_errno(sys_getdents64(arg1, dirp, count));
9628 if (!is_error(ret)) {
9629 struct linux_dirent64 *de;
9634 reclen = de->d_reclen;
9637 de->d_reclen = tswap16(reclen);
9638 tswap64s((uint64_t *)&de->d_ino);
9639 tswap64s((uint64_t *)&de->d_off);
9640 de = (struct linux_dirent64 *)((char *)de + reclen);
9644 unlock_user(dirp, arg2, ret);
9647 #endif /* TARGET_NR_getdents64 */
9648 #if defined(TARGET_NR__newselect)
9649 case TARGET_NR__newselect:
9650 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9653 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9654 # ifdef TARGET_NR_poll
9655 case TARGET_NR_poll:
9657 # ifdef TARGET_NR_ppoll
9658 case TARGET_NR_ppoll:
9661 struct target_pollfd *target_pfd;
9662 unsigned int nfds = arg2;
9669 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9670 ret = -TARGET_EINVAL;
9674 target_pfd = lock_user(VERIFY_WRITE, arg1,
9675 sizeof(struct target_pollfd) * nfds, 1);
9680 pfd = alloca(sizeof(struct pollfd) * nfds);
9681 for (i = 0; i < nfds; i++) {
9682 pfd[i].fd = tswap32(target_pfd[i].fd);
9683 pfd[i].events = tswap16(target_pfd[i].events);
9688 # ifdef TARGET_NR_ppoll
9689 case TARGET_NR_ppoll:
9691 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9692 target_sigset_t *target_set;
9693 sigset_t _set, *set = &_set;
9696 if (target_to_host_timespec(timeout_ts, arg3)) {
9697 unlock_user(target_pfd, arg1, 0);
9705 if (arg5 != sizeof(target_sigset_t)) {
9706 unlock_user(target_pfd, arg1, 0);
9707 ret = -TARGET_EINVAL;
9711 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9713 unlock_user(target_pfd, arg1, 0);
9716 target_to_host_sigset(set, target_set);
9721 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9722 set, SIGSET_T_SIZE));
9724 if (!is_error(ret) && arg3) {
9725 host_to_target_timespec(arg3, timeout_ts);
9728 unlock_user(target_set, arg4, 0);
9733 # ifdef TARGET_NR_poll
9734 case TARGET_NR_poll:
9736 struct timespec ts, *pts;
9739 /* Convert ms to secs, ns */
9740 ts.tv_sec = arg3 / 1000;
9741 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9744 /* -ve poll() timeout means "infinite" */
9747 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9752 g_assert_not_reached();
9755 if (!is_error(ret)) {
9756 for(i = 0; i < nfds; i++) {
9757 target_pfd[i].revents = tswap16(pfd[i].revents);
9760 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9764 case TARGET_NR_flock:
9765 /* NOTE: the flock constant seems to be the same for every
9767 ret = get_errno(safe_flock(arg1, arg2));
9769 case TARGET_NR_readv:
9771 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9773 ret = get_errno(safe_readv(arg1, vec, arg3));
9774 unlock_iovec(vec, arg2, arg3, 1);
9776 ret = -host_to_target_errno(errno);
9780 case TARGET_NR_writev:
9782 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9784 ret = get_errno(safe_writev(arg1, vec, arg3));
9785 unlock_iovec(vec, arg2, arg3, 0);
9787 ret = -host_to_target_errno(errno);
9791 case TARGET_NR_getsid:
9792 ret = get_errno(getsid(arg1));
9794 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9795 case TARGET_NR_fdatasync:
9796 ret = get_errno(fdatasync(arg1));
9799 #ifdef TARGET_NR__sysctl
9800 case TARGET_NR__sysctl:
9801 /* We don't implement this, but ENOTDIR is always a safe
9803 ret = -TARGET_ENOTDIR;
9806 case TARGET_NR_sched_getaffinity:
9808 unsigned int mask_size;
9809 unsigned long *mask;
9812 * sched_getaffinity needs multiples of ulong, so need to take
9813 * care of mismatches between target ulong and host ulong sizes.
9815 if (arg2 & (sizeof(abi_ulong) - 1)) {
9816 ret = -TARGET_EINVAL;
9819 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9821 mask = alloca(mask_size);
9822 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9824 if (!is_error(ret)) {
9826 /* More data returned than the caller's buffer will fit.
9827 * This only happens if sizeof(abi_long) < sizeof(long)
9828 * and the caller passed us a buffer holding an odd number
9829 * of abi_longs. If the host kernel is actually using the
9830 * extra 4 bytes then fail EINVAL; otherwise we can just
9831 * ignore them and only copy the interesting part.
9833 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9834 if (numcpus > arg2 * 8) {
9835 ret = -TARGET_EINVAL;
9841 if (copy_to_user(arg3, mask, ret)) {
9847 case TARGET_NR_sched_setaffinity:
9849 unsigned int mask_size;
9850 unsigned long *mask;
9853 * sched_setaffinity needs multiples of ulong, so need to take
9854 * care of mismatches between target ulong and host ulong sizes.
9856 if (arg2 & (sizeof(abi_ulong) - 1)) {
9857 ret = -TARGET_EINVAL;
9860 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9862 mask = alloca(mask_size);
9863 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9866 memcpy(mask, p, arg2);
9867 unlock_user_struct(p, arg2, 0);
9869 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9872 case TARGET_NR_sched_setparam:
9874 struct sched_param *target_schp;
9875 struct sched_param schp;
9878 return -TARGET_EINVAL;
9880 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9882 schp.sched_priority = tswap32(target_schp->sched_priority);
9883 unlock_user_struct(target_schp, arg2, 0);
9884 ret = get_errno(sched_setparam(arg1, &schp));
9887 case TARGET_NR_sched_getparam:
9889 struct sched_param *target_schp;
9890 struct sched_param schp;
9893 return -TARGET_EINVAL;
9895 ret = get_errno(sched_getparam(arg1, &schp));
9896 if (!is_error(ret)) {
9897 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9899 target_schp->sched_priority = tswap32(schp.sched_priority);
9900 unlock_user_struct(target_schp, arg2, 1);
9904 case TARGET_NR_sched_setscheduler:
9906 struct sched_param *target_schp;
9907 struct sched_param schp;
9909 return -TARGET_EINVAL;
9911 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9913 schp.sched_priority = tswap32(target_schp->sched_priority);
9914 unlock_user_struct(target_schp, arg3, 0);
9915 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9918 case TARGET_NR_sched_getscheduler:
9919 ret = get_errno(sched_getscheduler(arg1));
9921 case TARGET_NR_sched_yield:
9922 ret = get_errno(sched_yield());
9924 case TARGET_NR_sched_get_priority_max:
9925 ret = get_errno(sched_get_priority_max(arg1));
9927 case TARGET_NR_sched_get_priority_min:
9928 ret = get_errno(sched_get_priority_min(arg1));
9930 case TARGET_NR_sched_rr_get_interval:
9933 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9934 if (!is_error(ret)) {
9935 ret = host_to_target_timespec(arg2, &ts);
9939 case TARGET_NR_nanosleep:
9941 struct timespec req, rem;
9942 target_to_host_timespec(&req, arg1);
9943 ret = get_errno(safe_nanosleep(&req, &rem));
9944 if (is_error(ret) && arg2) {
9945 host_to_target_timespec(arg2, &rem);
9949 #ifdef TARGET_NR_query_module
9950 case TARGET_NR_query_module:
9953 #ifdef TARGET_NR_nfsservctl
9954 case TARGET_NR_nfsservctl:
9957 case TARGET_NR_prctl:
9959 case PR_GET_PDEATHSIG:
9962 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9963 if (!is_error(ret) && arg2
9964 && put_user_ual(deathsig, arg2)) {
9972 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9976 ret = get_errno(prctl(arg1, (unsigned long)name,
9978 unlock_user(name, arg2, 16);
9983 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9987 ret = get_errno(prctl(arg1, (unsigned long)name,
9989 unlock_user(name, arg2, 0);
9994 /* Most prctl options have no pointer arguments */
9995 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9999 #ifdef TARGET_NR_arch_prctl
10000 case TARGET_NR_arch_prctl:
10001 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10002 ret = do_arch_prctl(cpu_env, arg1, arg2);
10005 goto unimplemented;
10008 #ifdef TARGET_NR_pread64
10009 case TARGET_NR_pread64:
10010 if (regpairs_aligned(cpu_env)) {
10014 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10016 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10017 unlock_user(p, arg2, ret);
10019 case TARGET_NR_pwrite64:
10020 if (regpairs_aligned(cpu_env)) {
10024 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10026 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10027 unlock_user(p, arg2, 0);
10030 case TARGET_NR_getcwd:
10031 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10033 ret = get_errno(sys_getcwd1(p, arg2));
10034 unlock_user(p, arg1, ret);
10036 case TARGET_NR_capget:
10037 case TARGET_NR_capset:
10039 struct target_user_cap_header *target_header;
10040 struct target_user_cap_data *target_data = NULL;
10041 struct __user_cap_header_struct header;
10042 struct __user_cap_data_struct data[2];
10043 struct __user_cap_data_struct *dataptr = NULL;
10044 int i, target_datalen;
10045 int data_items = 1;
10047 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10050 header.version = tswap32(target_header->version);
10051 header.pid = tswap32(target_header->pid);
10053 if (header.version != _LINUX_CAPABILITY_VERSION) {
10054 /* Version 2 and up takes pointer to two user_data structs */
10058 target_datalen = sizeof(*target_data) * data_items;
10061 if (num == TARGET_NR_capget) {
10062 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10064 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10066 if (!target_data) {
10067 unlock_user_struct(target_header, arg1, 0);
10071 if (num == TARGET_NR_capset) {
10072 for (i = 0; i < data_items; i++) {
10073 data[i].effective = tswap32(target_data[i].effective);
10074 data[i].permitted = tswap32(target_data[i].permitted);
10075 data[i].inheritable = tswap32(target_data[i].inheritable);
10082 if (num == TARGET_NR_capget) {
10083 ret = get_errno(capget(&header, dataptr));
10085 ret = get_errno(capset(&header, dataptr));
10088 /* The kernel always updates version for both capget and capset */
10089 target_header->version = tswap32(header.version);
10090 unlock_user_struct(target_header, arg1, 1);
10093 if (num == TARGET_NR_capget) {
10094 for (i = 0; i < data_items; i++) {
10095 target_data[i].effective = tswap32(data[i].effective);
10096 target_data[i].permitted = tswap32(data[i].permitted);
10097 target_data[i].inheritable = tswap32(data[i].inheritable);
10099 unlock_user(target_data, arg2, target_datalen);
10101 unlock_user(target_data, arg2, 0);
10106 case TARGET_NR_sigaltstack:
10107 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10110 #ifdef CONFIG_SENDFILE
10111 case TARGET_NR_sendfile:
10113 off_t *offp = NULL;
10116 ret = get_user_sal(off, arg3);
10117 if (is_error(ret)) {
10122 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10123 if (!is_error(ret) && arg3) {
10124 abi_long ret2 = put_user_sal(off, arg3);
10125 if (is_error(ret2)) {
10131 #ifdef TARGET_NR_sendfile64
10132 case TARGET_NR_sendfile64:
10134 off_t *offp = NULL;
10137 ret = get_user_s64(off, arg3);
10138 if (is_error(ret)) {
10143 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10144 if (!is_error(ret) && arg3) {
10145 abi_long ret2 = put_user_s64(off, arg3);
10146 if (is_error(ret2)) {
10154 case TARGET_NR_sendfile:
10155 #ifdef TARGET_NR_sendfile64
10156 case TARGET_NR_sendfile64:
10158 goto unimplemented;
10161 #ifdef TARGET_NR_getpmsg
10162 case TARGET_NR_getpmsg:
10163 goto unimplemented;
10165 #ifdef TARGET_NR_putpmsg
10166 case TARGET_NR_putpmsg:
10167 goto unimplemented;
10169 #ifdef TARGET_NR_vfork
10170 case TARGET_NR_vfork:
10171 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10175 #ifdef TARGET_NR_ugetrlimit
10176 case TARGET_NR_ugetrlimit:
10178 struct rlimit rlim;
10179 int resource = target_to_host_resource(arg1);
10180 ret = get_errno(getrlimit(resource, &rlim));
10181 if (!is_error(ret)) {
10182 struct target_rlimit *target_rlim;
10183 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10185 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10186 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10187 unlock_user_struct(target_rlim, arg2, 1);
10192 #ifdef TARGET_NR_truncate64
10193 case TARGET_NR_truncate64:
10194 if (!(p = lock_user_string(arg1)))
10196 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10197 unlock_user(p, arg1, 0);
10200 #ifdef TARGET_NR_ftruncate64
10201 case TARGET_NR_ftruncate64:
10202 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10205 #ifdef TARGET_NR_stat64
10206 case TARGET_NR_stat64:
10207 if (!(p = lock_user_string(arg1)))
10209 ret = get_errno(stat(path(p), &st));
10210 unlock_user(p, arg1, 0);
10211 if (!is_error(ret))
10212 ret = host_to_target_stat64(cpu_env, arg2, &st);
10215 #ifdef TARGET_NR_lstat64
10216 case TARGET_NR_lstat64:
10217 if (!(p = lock_user_string(arg1)))
10219 ret = get_errno(lstat(path(p), &st));
10220 unlock_user(p, arg1, 0);
10221 if (!is_error(ret))
10222 ret = host_to_target_stat64(cpu_env, arg2, &st);
10225 #ifdef TARGET_NR_fstat64
10226 case TARGET_NR_fstat64:
10227 ret = get_errno(fstat(arg1, &st));
10228 if (!is_error(ret))
10229 ret = host_to_target_stat64(cpu_env, arg2, &st);
10232 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10233 #ifdef TARGET_NR_fstatat64
10234 case TARGET_NR_fstatat64:
10236 #ifdef TARGET_NR_newfstatat
10237 case TARGET_NR_newfstatat:
10239 if (!(p = lock_user_string(arg2)))
10241 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10242 if (!is_error(ret))
10243 ret = host_to_target_stat64(cpu_env, arg3, &st);
10246 #ifdef TARGET_NR_lchown
10247 case TARGET_NR_lchown:
10248 if (!(p = lock_user_string(arg1)))
10250 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10251 unlock_user(p, arg1, 0);
10254 #ifdef TARGET_NR_getuid
10255 case TARGET_NR_getuid:
10256 ret = get_errno(high2lowuid(getuid()));
10259 #ifdef TARGET_NR_getgid
10260 case TARGET_NR_getgid:
10261 ret = get_errno(high2lowgid(getgid()));
10264 #ifdef TARGET_NR_geteuid
10265 case TARGET_NR_geteuid:
10266 ret = get_errno(high2lowuid(geteuid()));
10269 #ifdef TARGET_NR_getegid
10270 case TARGET_NR_getegid:
10271 ret = get_errno(high2lowgid(getegid()));
10274 case TARGET_NR_setreuid:
10275 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10277 case TARGET_NR_setregid:
10278 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10280 case TARGET_NR_getgroups:
10282 int gidsetsize = arg1;
10283 target_id *target_grouplist;
10287 grouplist = alloca(gidsetsize * sizeof(gid_t));
10288 ret = get_errno(getgroups(gidsetsize, grouplist));
10289 if (gidsetsize == 0)
10291 if (!is_error(ret)) {
10292 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10293 if (!target_grouplist)
10295 for(i = 0;i < ret; i++)
10296 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10297 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10301 case TARGET_NR_setgroups:
10303 int gidsetsize = arg1;
10304 target_id *target_grouplist;
10305 gid_t *grouplist = NULL;
10308 grouplist = alloca(gidsetsize * sizeof(gid_t));
10309 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10310 if (!target_grouplist) {
10311 ret = -TARGET_EFAULT;
10314 for (i = 0; i < gidsetsize; i++) {
10315 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10317 unlock_user(target_grouplist, arg2, 0);
10319 ret = get_errno(setgroups(gidsetsize, grouplist));
10322 case TARGET_NR_fchown:
10323 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10325 #if defined(TARGET_NR_fchownat)
10326 case TARGET_NR_fchownat:
10327 if (!(p = lock_user_string(arg2)))
10329 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10330 low2highgid(arg4), arg5));
10331 unlock_user(p, arg2, 0);
10334 #ifdef TARGET_NR_setresuid
10335 case TARGET_NR_setresuid:
10336 ret = get_errno(sys_setresuid(low2highuid(arg1),
10338 low2highuid(arg3)));
10341 #ifdef TARGET_NR_getresuid
10342 case TARGET_NR_getresuid:
10344 uid_t ruid, euid, suid;
10345 ret = get_errno(getresuid(&ruid, &euid, &suid));
10346 if (!is_error(ret)) {
10347 if (put_user_id(high2lowuid(ruid), arg1)
10348 || put_user_id(high2lowuid(euid), arg2)
10349 || put_user_id(high2lowuid(suid), arg3))
10355 #ifdef TARGET_NR_getresgid
10356 case TARGET_NR_setresgid:
10357 ret = get_errno(sys_setresgid(low2highgid(arg1),
10359 low2highgid(arg3)));
10362 #ifdef TARGET_NR_getresgid
10363 case TARGET_NR_getresgid:
10365 gid_t rgid, egid, sgid;
10366 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10367 if (!is_error(ret)) {
10368 if (put_user_id(high2lowgid(rgid), arg1)
10369 || put_user_id(high2lowgid(egid), arg2)
10370 || put_user_id(high2lowgid(sgid), arg3))
10376 #ifdef TARGET_NR_chown
10377 case TARGET_NR_chown:
10378 if (!(p = lock_user_string(arg1)))
10380 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10381 unlock_user(p, arg1, 0);
10384 case TARGET_NR_setuid:
10385 ret = get_errno(sys_setuid(low2highuid(arg1)));
10387 case TARGET_NR_setgid:
10388 ret = get_errno(sys_setgid(low2highgid(arg1)));
10390 case TARGET_NR_setfsuid:
10391 ret = get_errno(setfsuid(arg1));
10393 case TARGET_NR_setfsgid:
10394 ret = get_errno(setfsgid(arg1));
10397 #ifdef TARGET_NR_lchown32
10398 case TARGET_NR_lchown32:
10399 if (!(p = lock_user_string(arg1)))
10401 ret = get_errno(lchown(p, arg2, arg3));
10402 unlock_user(p, arg1, 0);
10405 #ifdef TARGET_NR_getuid32
10406 case TARGET_NR_getuid32:
10407 ret = get_errno(getuid());
10411 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10412 /* Alpha specific */
10413 case TARGET_NR_getxuid:
10417 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10419 ret = get_errno(getuid());
10422 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10423 /* Alpha specific */
10424 case TARGET_NR_getxgid:
10428 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10430 ret = get_errno(getgid());
10433 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10434 /* Alpha specific */
10435 case TARGET_NR_osf_getsysinfo:
10436 ret = -TARGET_EOPNOTSUPP;
10438 case TARGET_GSI_IEEE_FP_CONTROL:
10440 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10442 /* Copied from linux ieee_fpcr_to_swcr. */
10443 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10444 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10445 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10446 | SWCR_TRAP_ENABLE_DZE
10447 | SWCR_TRAP_ENABLE_OVF);
10448 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10449 | SWCR_TRAP_ENABLE_INE);
10450 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10451 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10453 if (put_user_u64 (swcr, arg2))
10459 /* case GSI_IEEE_STATE_AT_SIGNAL:
10460 -- Not implemented in linux kernel.
10462 -- Retrieves current unaligned access state; not much used.
10463 case GSI_PROC_TYPE:
10464 -- Retrieves implver information; surely not used.
10465 case GSI_GET_HWRPB:
10466 -- Grabs a copy of the HWRPB; surely not used.
10471 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10472 /* Alpha specific */
10473 case TARGET_NR_osf_setsysinfo:
10474 ret = -TARGET_EOPNOTSUPP;
10476 case TARGET_SSI_IEEE_FP_CONTROL:
10478 uint64_t swcr, fpcr, orig_fpcr;
10480 if (get_user_u64 (swcr, arg2)) {
10483 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10484 fpcr = orig_fpcr & FPCR_DYN_MASK;
10486 /* Copied from linux ieee_swcr_to_fpcr. */
10487 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10488 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10489 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10490 | SWCR_TRAP_ENABLE_DZE
10491 | SWCR_TRAP_ENABLE_OVF)) << 48;
10492 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10493 | SWCR_TRAP_ENABLE_INE)) << 57;
10494 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10495 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10497 cpu_alpha_store_fpcr(cpu_env, fpcr);
10502 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10504 uint64_t exc, fpcr, orig_fpcr;
10507 if (get_user_u64(exc, arg2)) {
10511 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10513 /* We only add to the exception status here. */
10514 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10516 cpu_alpha_store_fpcr(cpu_env, fpcr);
10519 /* Old exceptions are not signaled. */
10520 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10522 /* If any exceptions set by this call,
10523 and are unmasked, send a signal. */
10525 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10526 si_code = TARGET_FPE_FLTRES;
10528 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10529 si_code = TARGET_FPE_FLTUND;
10531 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10532 si_code = TARGET_FPE_FLTOVF;
10534 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10535 si_code = TARGET_FPE_FLTDIV;
10537 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10538 si_code = TARGET_FPE_FLTINV;
10540 if (si_code != 0) {
10541 target_siginfo_t info;
10542 info.si_signo = SIGFPE;
10544 info.si_code = si_code;
10545 info._sifields._sigfault._addr
10546 = ((CPUArchState *)cpu_env)->pc;
10547 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10552 /* case SSI_NVPAIRS:
10553 -- Used with SSIN_UACPROC to enable unaligned accesses.
10554 case SSI_IEEE_STATE_AT_SIGNAL:
10555 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10556 -- Not implemented in linux kernel
10561 #ifdef TARGET_NR_osf_sigprocmask
10562 /* Alpha specific. */
10563 case TARGET_NR_osf_sigprocmask:
10567 sigset_t set, oldset;
10570 case TARGET_SIG_BLOCK:
10573 case TARGET_SIG_UNBLOCK:
10576 case TARGET_SIG_SETMASK:
10580 ret = -TARGET_EINVAL;
10584 target_to_host_old_sigset(&set, &mask);
10585 ret = do_sigprocmask(how, &set, &oldset);
10587 host_to_target_old_sigset(&mask, &oldset);
10594 #ifdef TARGET_NR_getgid32
10595 case TARGET_NR_getgid32:
10596 ret = get_errno(getgid());
10599 #ifdef TARGET_NR_geteuid32
10600 case TARGET_NR_geteuid32:
10601 ret = get_errno(geteuid());
10604 #ifdef TARGET_NR_getegid32
10605 case TARGET_NR_getegid32:
10606 ret = get_errno(getegid());
10609 #ifdef TARGET_NR_setreuid32
10610 case TARGET_NR_setreuid32:
10611 ret = get_errno(setreuid(arg1, arg2));
10614 #ifdef TARGET_NR_setregid32
10615 case TARGET_NR_setregid32:
10616 ret = get_errno(setregid(arg1, arg2));
10619 #ifdef TARGET_NR_getgroups32
10620 case TARGET_NR_getgroups32:
10622 int gidsetsize = arg1;
10623 uint32_t *target_grouplist;
10627 grouplist = alloca(gidsetsize * sizeof(gid_t));
10628 ret = get_errno(getgroups(gidsetsize, grouplist));
10629 if (gidsetsize == 0)
10631 if (!is_error(ret)) {
10632 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10633 if (!target_grouplist) {
10634 ret = -TARGET_EFAULT;
10637 for(i = 0;i < ret; i++)
10638 target_grouplist[i] = tswap32(grouplist[i]);
10639 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10644 #ifdef TARGET_NR_setgroups32
10645 case TARGET_NR_setgroups32:
10647 int gidsetsize = arg1;
10648 uint32_t *target_grouplist;
10652 grouplist = alloca(gidsetsize * sizeof(gid_t));
10653 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10654 if (!target_grouplist) {
10655 ret = -TARGET_EFAULT;
10658 for(i = 0;i < gidsetsize; i++)
10659 grouplist[i] = tswap32(target_grouplist[i]);
10660 unlock_user(target_grouplist, arg2, 0);
10661 ret = get_errno(setgroups(gidsetsize, grouplist));
10665 #ifdef TARGET_NR_fchown32
10666 case TARGET_NR_fchown32:
10667 ret = get_errno(fchown(arg1, arg2, arg3));
10670 #ifdef TARGET_NR_setresuid32
10671 case TARGET_NR_setresuid32:
10672 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10675 #ifdef TARGET_NR_getresuid32
10676 case TARGET_NR_getresuid32:
10678 uid_t ruid, euid, suid;
10679 ret = get_errno(getresuid(&ruid, &euid, &suid));
10680 if (!is_error(ret)) {
10681 if (put_user_u32(ruid, arg1)
10682 || put_user_u32(euid, arg2)
10683 || put_user_u32(suid, arg3))
10689 #ifdef TARGET_NR_setresgid32
10690 case TARGET_NR_setresgid32:
10691 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10694 #ifdef TARGET_NR_getresgid32
10695 case TARGET_NR_getresgid32:
10697 gid_t rgid, egid, sgid;
10698 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10699 if (!is_error(ret)) {
10700 if (put_user_u32(rgid, arg1)
10701 || put_user_u32(egid, arg2)
10702 || put_user_u32(sgid, arg3))
10708 #ifdef TARGET_NR_chown32
10709 case TARGET_NR_chown32:
10710 if (!(p = lock_user_string(arg1)))
10712 ret = get_errno(chown(p, arg2, arg3));
10713 unlock_user(p, arg1, 0);
10716 #ifdef TARGET_NR_setuid32
10717 case TARGET_NR_setuid32:
10718 ret = get_errno(sys_setuid(arg1));
10721 #ifdef TARGET_NR_setgid32
10722 case TARGET_NR_setgid32:
10723 ret = get_errno(sys_setgid(arg1));
10726 #ifdef TARGET_NR_setfsuid32
10727 case TARGET_NR_setfsuid32:
10728 ret = get_errno(setfsuid(arg1));
10731 #ifdef TARGET_NR_setfsgid32
10732 case TARGET_NR_setfsgid32:
10733 ret = get_errno(setfsgid(arg1));
10737 case TARGET_NR_pivot_root:
10738 goto unimplemented;
10739 #ifdef TARGET_NR_mincore
10740 case TARGET_NR_mincore:
10743 ret = -TARGET_EFAULT;
10744 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10746 if (!(p = lock_user_string(arg3)))
10748 ret = get_errno(mincore(a, arg2, p));
10749 unlock_user(p, arg3, ret);
10751 unlock_user(a, arg1, 0);
10755 #ifdef TARGET_NR_arm_fadvise64_64
10756 case TARGET_NR_arm_fadvise64_64:
10757 /* arm_fadvise64_64 looks like fadvise64_64 but
10758 * with different argument order: fd, advice, offset, len
10759 * rather than the usual fd, offset, len, advice.
10760 * Note that offset and len are both 64-bit so appear as
10761 * pairs of 32-bit registers.
10763 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10764 target_offset64(arg5, arg6), arg2);
10765 ret = -host_to_target_errno(ret);
10769 #if TARGET_ABI_BITS == 32
10771 #ifdef TARGET_NR_fadvise64_64
10772 case TARGET_NR_fadvise64_64:
10773 /* 6 args: fd, offset (high, low), len (high, low), advice */
10774 if (regpairs_aligned(cpu_env)) {
10775 /* offset is in (3,4), len in (5,6) and advice in 7 */
10782 ret = -host_to_target_errno(posix_fadvise(arg1,
10783 target_offset64(arg2, arg3),
10784 target_offset64(arg4, arg5),
10789 #ifdef TARGET_NR_fadvise64
10790 case TARGET_NR_fadvise64:
10791 /* 5 args: fd, offset (high, low), len, advice */
10792 if (regpairs_aligned(cpu_env)) {
10793 /* offset is in (3,4), len in 5 and advice in 6 */
10799 ret = -host_to_target_errno(posix_fadvise(arg1,
10800 target_offset64(arg2, arg3),
10805 #else /* not a 32-bit ABI */
10806 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10807 #ifdef TARGET_NR_fadvise64_64
10808 case TARGET_NR_fadvise64_64:
10810 #ifdef TARGET_NR_fadvise64
10811 case TARGET_NR_fadvise64:
10813 #ifdef TARGET_S390X
10815 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10816 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10817 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10818 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10822 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10825 #endif /* end of 64-bit ABI fadvise handling */
10827 #ifdef TARGET_NR_madvise
10828 case TARGET_NR_madvise:
10829 /* A straight passthrough may not be safe because qemu sometimes
10830 turns private file-backed mappings into anonymous mappings.
10831 This will break MADV_DONTNEED.
10832 This is a hint, so ignoring and returning success is ok. */
10833 ret = get_errno(0);
10836 #if TARGET_ABI_BITS == 32
10837 case TARGET_NR_fcntl64:
10841 from_flock64_fn *copyfrom = copy_from_user_flock64;
10842 to_flock64_fn *copyto = copy_to_user_flock64;
10845 if (((CPUARMState *)cpu_env)->eabi) {
10846 copyfrom = copy_from_user_eabi_flock64;
10847 copyto = copy_to_user_eabi_flock64;
10851 cmd = target_to_host_fcntl_cmd(arg2);
10852 if (cmd == -TARGET_EINVAL) {
10858 case TARGET_F_GETLK64:
10859 ret = copyfrom(&fl, arg3);
10863 ret = get_errno(fcntl(arg1, cmd, &fl));
10865 ret = copyto(arg3, &fl);
10869 case TARGET_F_SETLK64:
10870 case TARGET_F_SETLKW64:
10871 ret = copyfrom(&fl, arg3);
10875 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10878 ret = do_fcntl(arg1, arg2, arg3);
10884 #ifdef TARGET_NR_cacheflush
10885 case TARGET_NR_cacheflush:
10886 /* self-modifying code is handled automatically, so nothing needed */
10890 #ifdef TARGET_NR_security
10891 case TARGET_NR_security:
10892 goto unimplemented;
10894 #ifdef TARGET_NR_getpagesize
10895 case TARGET_NR_getpagesize:
10896 ret = TARGET_PAGE_SIZE;
10899 case TARGET_NR_gettid:
10900 ret = get_errno(gettid());
10902 #ifdef TARGET_NR_readahead
10903 case TARGET_NR_readahead:
10904 #if TARGET_ABI_BITS == 32
10905 if (regpairs_aligned(cpu_env)) {
10910 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10912 ret = get_errno(readahead(arg1, arg2, arg3));
10917 #ifdef TARGET_NR_setxattr
10918 case TARGET_NR_listxattr:
10919 case TARGET_NR_llistxattr:
10923 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10925 ret = -TARGET_EFAULT;
10929 p = lock_user_string(arg1);
10931 if (num == TARGET_NR_listxattr) {
10932 ret = get_errno(listxattr(p, b, arg3));
10934 ret = get_errno(llistxattr(p, b, arg3));
10937 ret = -TARGET_EFAULT;
10939 unlock_user(p, arg1, 0);
10940 unlock_user(b, arg2, arg3);
10943 case TARGET_NR_flistxattr:
10947 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10949 ret = -TARGET_EFAULT;
10953 ret = get_errno(flistxattr(arg1, b, arg3));
10954 unlock_user(b, arg2, arg3);
10957 case TARGET_NR_setxattr:
10958 case TARGET_NR_lsetxattr:
10960 void *p, *n, *v = 0;
10962 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10964 ret = -TARGET_EFAULT;
10968 p = lock_user_string(arg1);
10969 n = lock_user_string(arg2);
10971 if (num == TARGET_NR_setxattr) {
10972 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10974 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10977 ret = -TARGET_EFAULT;
10979 unlock_user(p, arg1, 0);
10980 unlock_user(n, arg2, 0);
10981 unlock_user(v, arg3, 0);
10984 case TARGET_NR_fsetxattr:
10988 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10990 ret = -TARGET_EFAULT;
10994 n = lock_user_string(arg2);
10996 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10998 ret = -TARGET_EFAULT;
11000 unlock_user(n, arg2, 0);
11001 unlock_user(v, arg3, 0);
11004 case TARGET_NR_getxattr:
11005 case TARGET_NR_lgetxattr:
11007 void *p, *n, *v = 0;
11009 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11011 ret = -TARGET_EFAULT;
11015 p = lock_user_string(arg1);
11016 n = lock_user_string(arg2);
11018 if (num == TARGET_NR_getxattr) {
11019 ret = get_errno(getxattr(p, n, v, arg4));
11021 ret = get_errno(lgetxattr(p, n, v, arg4));
11024 ret = -TARGET_EFAULT;
11026 unlock_user(p, arg1, 0);
11027 unlock_user(n, arg2, 0);
11028 unlock_user(v, arg3, arg4);
11031 case TARGET_NR_fgetxattr:
11035 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11037 ret = -TARGET_EFAULT;
11041 n = lock_user_string(arg2);
11043 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11045 ret = -TARGET_EFAULT;
11047 unlock_user(n, arg2, 0);
11048 unlock_user(v, arg3, arg4);
11051 case TARGET_NR_removexattr:
11052 case TARGET_NR_lremovexattr:
11055 p = lock_user_string(arg1);
11056 n = lock_user_string(arg2);
11058 if (num == TARGET_NR_removexattr) {
11059 ret = get_errno(removexattr(p, n));
11061 ret = get_errno(lremovexattr(p, n));
11064 ret = -TARGET_EFAULT;
11066 unlock_user(p, arg1, 0);
11067 unlock_user(n, arg2, 0);
11070 case TARGET_NR_fremovexattr:
11073 n = lock_user_string(arg2);
11075 ret = get_errno(fremovexattr(arg1, n));
11077 ret = -TARGET_EFAULT;
11079 unlock_user(n, arg2, 0);
11083 #endif /* CONFIG_ATTR */
11084 #ifdef TARGET_NR_set_thread_area
11085 case TARGET_NR_set_thread_area:
11086 #if defined(TARGET_MIPS)
11087 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11090 #elif defined(TARGET_CRIS)
11092 ret = -TARGET_EINVAL;
11094 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11098 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11099 ret = do_set_thread_area(cpu_env, arg1);
11101 #elif defined(TARGET_M68K)
11103 TaskState *ts = cpu->opaque;
11104 ts->tp_value = arg1;
11109 goto unimplemented_nowarn;
11112 #ifdef TARGET_NR_get_thread_area
11113 case TARGET_NR_get_thread_area:
11114 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11115 ret = do_get_thread_area(cpu_env, arg1);
11117 #elif defined(TARGET_M68K)
11119 TaskState *ts = cpu->opaque;
11120 ret = ts->tp_value;
11124 goto unimplemented_nowarn;
11127 #ifdef TARGET_NR_getdomainname
11128 case TARGET_NR_getdomainname:
11129 goto unimplemented_nowarn;
11132 #ifdef TARGET_NR_clock_gettime
11133 case TARGET_NR_clock_gettime:
11135 struct timespec ts;
11136 ret = get_errno(clock_gettime(arg1, &ts));
11137 if (!is_error(ret)) {
11138 host_to_target_timespec(arg2, &ts);
11143 #ifdef TARGET_NR_clock_getres
11144 case TARGET_NR_clock_getres:
11146 struct timespec ts;
11147 ret = get_errno(clock_getres(arg1, &ts));
11148 if (!is_error(ret)) {
11149 host_to_target_timespec(arg2, &ts);
11154 #ifdef TARGET_NR_clock_nanosleep
11155 case TARGET_NR_clock_nanosleep:
11157 struct timespec ts;
11158 target_to_host_timespec(&ts, arg3);
11159 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11160 &ts, arg4 ? &ts : NULL));
11162 host_to_target_timespec(arg4, &ts);
11164 #if defined(TARGET_PPC)
11165 /* clock_nanosleep is odd in that it returns positive errno values.
11166 * On PPC, CR0 bit 3 should be set in such a situation. */
11167 if (ret && ret != -TARGET_ERESTARTSYS) {
11168 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11175 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11176 case TARGET_NR_set_tid_address:
11177 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11181 case TARGET_NR_tkill:
11182 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11185 case TARGET_NR_tgkill:
11186 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11187 target_to_host_signal(arg3)));
11190 #ifdef TARGET_NR_set_robust_list
11191 case TARGET_NR_set_robust_list:
11192 case TARGET_NR_get_robust_list:
11193 /* The ABI for supporting robust futexes has userspace pass
11194 * the kernel a pointer to a linked list which is updated by
11195 * userspace after the syscall; the list is walked by the kernel
11196 * when the thread exits. Since the linked list in QEMU guest
11197 * memory isn't a valid linked list for the host and we have
11198 * no way to reliably intercept the thread-death event, we can't
11199 * support these. Silently return ENOSYS so that guest userspace
11200 * falls back to a non-robust futex implementation (which should
11201 * be OK except in the corner case of the guest crashing while
11202 * holding a mutex that is shared with another process via
11205 goto unimplemented_nowarn;
11208 #if defined(TARGET_NR_utimensat)
11209 case TARGET_NR_utimensat:
11211 struct timespec *tsp, ts[2];
11215 target_to_host_timespec(ts, arg3);
11216 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11220 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11222 if (!(p = lock_user_string(arg2))) {
11223 ret = -TARGET_EFAULT;
11226 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11227 unlock_user(p, arg2, 0);
11232 case TARGET_NR_futex:
11233 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11235 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11236 case TARGET_NR_inotify_init:
11237 ret = get_errno(sys_inotify_init());
11240 #ifdef CONFIG_INOTIFY1
11241 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11242 case TARGET_NR_inotify_init1:
11243 ret = get_errno(sys_inotify_init1(arg1));
11247 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11248 case TARGET_NR_inotify_add_watch:
11249 p = lock_user_string(arg2);
11250 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11251 unlock_user(p, arg2, 0);
11254 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11255 case TARGET_NR_inotify_rm_watch:
11256 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11260 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11261 case TARGET_NR_mq_open:
11263 struct mq_attr posix_mq_attr, *attrp;
11265 p = lock_user_string(arg1 - 1);
11267 copy_from_user_mq_attr (&posix_mq_attr, arg4);
11268 attrp = &posix_mq_attr;
11272 ret = get_errno(mq_open(p, arg2, arg3, attrp));
11273 unlock_user (p, arg1, 0);
11277 case TARGET_NR_mq_unlink:
11278 p = lock_user_string(arg1 - 1);
11280 ret = -TARGET_EFAULT;
11283 ret = get_errno(mq_unlink(p));
11284 unlock_user (p, arg1, 0);
11287 case TARGET_NR_mq_timedsend:
11289 struct timespec ts;
11291 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11293 target_to_host_timespec(&ts, arg5);
11294 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11295 host_to_target_timespec(arg5, &ts);
11297 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11299 unlock_user (p, arg2, arg3);
11303 case TARGET_NR_mq_timedreceive:
11305 struct timespec ts;
11308 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11310 target_to_host_timespec(&ts, arg5);
11311 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11313 host_to_target_timespec(arg5, &ts);
11315 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11318 unlock_user (p, arg2, arg3);
11320 put_user_u32(prio, arg4);
11324 /* Not implemented for now... */
11325 /* case TARGET_NR_mq_notify: */
11328 case TARGET_NR_mq_getsetattr:
11330 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11333 ret = mq_getattr(arg1, &posix_mq_attr_out);
11334 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11337 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11338 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11345 #ifdef CONFIG_SPLICE
11346 #ifdef TARGET_NR_tee
11347 case TARGET_NR_tee:
11349 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11353 #ifdef TARGET_NR_splice
11354 case TARGET_NR_splice:
11356 loff_t loff_in, loff_out;
11357 loff_t *ploff_in = NULL, *ploff_out = NULL;
11359 if (get_user_u64(loff_in, arg2)) {
11362 ploff_in = &loff_in;
11365 if (get_user_u64(loff_out, arg4)) {
11368 ploff_out = &loff_out;
11370 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11372 if (put_user_u64(loff_in, arg2)) {
11377 if (put_user_u64(loff_out, arg4)) {
11384 #ifdef TARGET_NR_vmsplice
11385 case TARGET_NR_vmsplice:
11387 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11389 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11390 unlock_iovec(vec, arg2, arg3, 0);
11392 ret = -host_to_target_errno(errno);
11397 #endif /* CONFIG_SPLICE */
11398 #ifdef CONFIG_EVENTFD
11399 #if defined(TARGET_NR_eventfd)
11400 case TARGET_NR_eventfd:
11401 ret = get_errno(eventfd(arg1, 0));
11402 fd_trans_unregister(ret);
11405 #if defined(TARGET_NR_eventfd2)
11406 case TARGET_NR_eventfd2:
11408 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11409 if (arg2 & TARGET_O_NONBLOCK) {
11410 host_flags |= O_NONBLOCK;
11412 if (arg2 & TARGET_O_CLOEXEC) {
11413 host_flags |= O_CLOEXEC;
11415 ret = get_errno(eventfd(arg1, host_flags));
11416 fd_trans_unregister(ret);
11420 #endif /* CONFIG_EVENTFD */
11421 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11422 case TARGET_NR_fallocate:
11423 #if TARGET_ABI_BITS == 32
11424 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11425 target_offset64(arg5, arg6)));
11427 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11431 #if defined(CONFIG_SYNC_FILE_RANGE)
11432 #if defined(TARGET_NR_sync_file_range)
11433 case TARGET_NR_sync_file_range:
11434 #if TARGET_ABI_BITS == 32
11435 #if defined(TARGET_MIPS)
11436 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11437 target_offset64(arg5, arg6), arg7));
11439 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11440 target_offset64(arg4, arg5), arg6));
11441 #endif /* !TARGET_MIPS */
11443 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11447 #if defined(TARGET_NR_sync_file_range2)
11448 case TARGET_NR_sync_file_range2:
11449 /* This is like sync_file_range but the arguments are reordered */
11450 #if TARGET_ABI_BITS == 32
11451 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11452 target_offset64(arg5, arg6), arg2));
11454 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11459 #if defined(TARGET_NR_signalfd4)
11460 case TARGET_NR_signalfd4:
11461 ret = do_signalfd4(arg1, arg2, arg4);
11464 #if defined(TARGET_NR_signalfd)
11465 case TARGET_NR_signalfd:
11466 ret = do_signalfd4(arg1, arg2, 0);
11469 #if defined(CONFIG_EPOLL)
11470 #if defined(TARGET_NR_epoll_create)
11471 case TARGET_NR_epoll_create:
11472 ret = get_errno(epoll_create(arg1));
11475 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11476 case TARGET_NR_epoll_create1:
11477 ret = get_errno(epoll_create1(arg1));
11480 #if defined(TARGET_NR_epoll_ctl)
11481 case TARGET_NR_epoll_ctl:
11483 struct epoll_event ep;
11484 struct epoll_event *epp = 0;
11486 struct target_epoll_event *target_ep;
11487 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11490 ep.events = tswap32(target_ep->events);
11491 /* The epoll_data_t union is just opaque data to the kernel,
11492 * so we transfer all 64 bits across and need not worry what
11493 * actual data type it is.
11495 ep.data.u64 = tswap64(target_ep->data.u64);
11496 unlock_user_struct(target_ep, arg4, 0);
11499 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11504 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11505 #if defined(TARGET_NR_epoll_wait)
11506 case TARGET_NR_epoll_wait:
11508 #if defined(TARGET_NR_epoll_pwait)
11509 case TARGET_NR_epoll_pwait:
11512 struct target_epoll_event *target_ep;
11513 struct epoll_event *ep;
11515 int maxevents = arg3;
11516 int timeout = arg4;
11518 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11519 ret = -TARGET_EINVAL;
11523 target_ep = lock_user(VERIFY_WRITE, arg2,
11524 maxevents * sizeof(struct target_epoll_event), 1);
11529 ep = alloca(maxevents * sizeof(struct epoll_event));
11532 #if defined(TARGET_NR_epoll_pwait)
11533 case TARGET_NR_epoll_pwait:
11535 target_sigset_t *target_set;
11536 sigset_t _set, *set = &_set;
11539 if (arg6 != sizeof(target_sigset_t)) {
11540 ret = -TARGET_EINVAL;
11544 target_set = lock_user(VERIFY_READ, arg5,
11545 sizeof(target_sigset_t), 1);
11547 unlock_user(target_ep, arg2, 0);
11550 target_to_host_sigset(set, target_set);
11551 unlock_user(target_set, arg5, 0);
11556 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11557 set, SIGSET_T_SIZE));
11561 #if defined(TARGET_NR_epoll_wait)
11562 case TARGET_NR_epoll_wait:
11563 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11568 ret = -TARGET_ENOSYS;
11570 if (!is_error(ret)) {
11572 for (i = 0; i < ret; i++) {
11573 target_ep[i].events = tswap32(ep[i].events);
11574 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11577 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11582 #ifdef TARGET_NR_prlimit64
11583 case TARGET_NR_prlimit64:
11585 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11586 struct target_rlimit64 *target_rnew, *target_rold;
11587 struct host_rlimit64 rnew, rold, *rnewp = 0;
11588 int resource = target_to_host_resource(arg2);
11590 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11593 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11594 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11595 unlock_user_struct(target_rnew, arg3, 0);
11599 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11600 if (!is_error(ret) && arg4) {
11601 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11604 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11605 target_rold->rlim_max = tswap64(rold.rlim_max);
11606 unlock_user_struct(target_rold, arg4, 1);
11611 #ifdef TARGET_NR_gethostname
11612 case TARGET_NR_gethostname:
11614 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11616 ret = get_errno(gethostname(name, arg2));
11617 unlock_user(name, arg1, arg2);
11619 ret = -TARGET_EFAULT;
11624 #ifdef TARGET_NR_atomic_cmpxchg_32
11625 case TARGET_NR_atomic_cmpxchg_32:
11627 /* should use start_exclusive from main.c */
11628 abi_ulong mem_value;
11629 if (get_user_u32(mem_value, arg6)) {
11630 target_siginfo_t info;
11631 info.si_signo = SIGSEGV;
11633 info.si_code = TARGET_SEGV_MAPERR;
11634 info._sifields._sigfault._addr = arg6;
11635 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
11639 if (mem_value == arg2)
11640 put_user_u32(arg1, arg6);
11645 #ifdef TARGET_NR_atomic_barrier
11646 case TARGET_NR_atomic_barrier:
11648 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11654 #ifdef TARGET_NR_timer_create
11655 case TARGET_NR_timer_create:
11657 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11659 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11662 int timer_index = next_free_host_timer();
11664 if (timer_index < 0) {
11665 ret = -TARGET_EAGAIN;
11667 timer_t *phtimer = g_posix_timers + timer_index;
11670 phost_sevp = &host_sevp;
11671 ret = target_to_host_sigevent(phost_sevp, arg2);
11677 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11681 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11690 #ifdef TARGET_NR_timer_settime
11691 case TARGET_NR_timer_settime:
11693 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11694 * struct itimerspec * old_value */
11695 target_timer_t timerid = get_timer_id(arg1);
11699 } else if (arg3 == 0) {
11700 ret = -TARGET_EINVAL;
11702 timer_t htimer = g_posix_timers[timerid];
11703 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11705 target_to_host_itimerspec(&hspec_new, arg3);
11707 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11708 host_to_target_itimerspec(arg2, &hspec_old);
11714 #ifdef TARGET_NR_timer_gettime
11715 case TARGET_NR_timer_gettime:
11717 /* args: timer_t timerid, struct itimerspec *curr_value */
11718 target_timer_t timerid = get_timer_id(arg1);
11722 } else if (!arg2) {
11723 ret = -TARGET_EFAULT;
11725 timer_t htimer = g_posix_timers[timerid];
11726 struct itimerspec hspec;
11727 ret = get_errno(timer_gettime(htimer, &hspec));
11729 if (host_to_target_itimerspec(arg2, &hspec)) {
11730 ret = -TARGET_EFAULT;
11737 #ifdef TARGET_NR_timer_getoverrun
11738 case TARGET_NR_timer_getoverrun:
11740 /* args: timer_t timerid */
11741 target_timer_t timerid = get_timer_id(arg1);
11746 timer_t htimer = g_posix_timers[timerid];
11747 ret = get_errno(timer_getoverrun(htimer));
11749 fd_trans_unregister(ret);
11754 #ifdef TARGET_NR_timer_delete
11755 case TARGET_NR_timer_delete:
11757 /* args: timer_t timerid */
11758 target_timer_t timerid = get_timer_id(arg1);
11763 timer_t htimer = g_posix_timers[timerid];
11764 ret = get_errno(timer_delete(htimer));
11765 g_posix_timers[timerid] = 0;
11771 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11772 case TARGET_NR_timerfd_create:
11773 ret = get_errno(timerfd_create(arg1,
11774 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11778 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11779 case TARGET_NR_timerfd_gettime:
11781 struct itimerspec its_curr;
11783 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11785 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11792 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11793 case TARGET_NR_timerfd_settime:
11795 struct itimerspec its_new, its_old, *p_new;
11798 if (target_to_host_itimerspec(&its_new, arg3)) {
11806 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11808 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11815 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11816 case TARGET_NR_ioprio_get:
11817 ret = get_errno(ioprio_get(arg1, arg2));
11821 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11822 case TARGET_NR_ioprio_set:
11823 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11827 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11828 case TARGET_NR_setns:
11829 ret = get_errno(setns(arg1, arg2));
11832 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11833 case TARGET_NR_unshare:
11834 ret = get_errno(unshare(arg1));
11840 gemu_log("qemu: Unsupported syscall: %d\n", num);
11841 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11842 unimplemented_nowarn:
11844 ret = -TARGET_ENOSYS;
11849 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11852 print_syscall_ret(num, ret);
11853 trace_guest_user_syscall_ret(cpu, num, ret);
11856 ret = -TARGET_EFAULT;