4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
122 //#define DEBUG_ERESTARTSYS
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
197 #define __NR__llseek __NR_lseek
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
206 _syscall0(int, gettid)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group,int,error_code)
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address,int *,tidptr)
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
235 const struct timespec *,timeout,int *,uaddr2,int,val3)
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
239 unsigned long *, user_mask_ptr);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
245 _syscall2(int, capget, struct __user_cap_header_struct *, header,
246 struct __user_cap_data_struct *, data);
247 _syscall2(int, capset, struct __user_cap_header_struct *, header,
248 struct __user_cap_data_struct *, data);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get, int, which, int, who)
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
259 static bitmask_transtbl fcntl_flags_tbl[] = {
260 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
261 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
262 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
263 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
264 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
265 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
266 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
267 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
268 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
269 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
270 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
271 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
272 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
283 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
294 QEMU_IFLA_BR_FORWARD_DELAY,
295 QEMU_IFLA_BR_HELLO_TIME,
296 QEMU_IFLA_BR_MAX_AGE,
297 QEMU_IFLA_BR_AGEING_TIME,
298 QEMU_IFLA_BR_STP_STATE,
299 QEMU_IFLA_BR_PRIORITY,
300 QEMU_IFLA_BR_VLAN_FILTERING,
301 QEMU_IFLA_BR_VLAN_PROTOCOL,
302 QEMU_IFLA_BR_GROUP_FWD_MASK,
303 QEMU_IFLA_BR_ROOT_ID,
304 QEMU_IFLA_BR_BRIDGE_ID,
305 QEMU_IFLA_BR_ROOT_PORT,
306 QEMU_IFLA_BR_ROOT_PATH_COST,
307 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
308 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
309 QEMU_IFLA_BR_HELLO_TIMER,
310 QEMU_IFLA_BR_TCN_TIMER,
311 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
312 QEMU_IFLA_BR_GC_TIMER,
313 QEMU_IFLA_BR_GROUP_ADDR,
314 QEMU_IFLA_BR_FDB_FLUSH,
315 QEMU_IFLA_BR_MCAST_ROUTER,
316 QEMU_IFLA_BR_MCAST_SNOOPING,
317 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
318 QEMU_IFLA_BR_MCAST_QUERIER,
319 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
320 QEMU_IFLA_BR_MCAST_HASH_MAX,
321 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
322 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
323 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
324 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
325 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
326 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
327 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
328 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
329 QEMU_IFLA_BR_NF_CALL_IPTABLES,
330 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
331 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
332 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
334 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
335 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
359 QEMU_IFLA_NET_NS_PID,
362 QEMU_IFLA_VFINFO_LIST,
370 QEMU_IFLA_PROMISCUITY,
371 QEMU_IFLA_NUM_TX_QUEUES,
372 QEMU_IFLA_NUM_RX_QUEUES,
374 QEMU_IFLA_PHYS_PORT_ID,
375 QEMU_IFLA_CARRIER_CHANGES,
376 QEMU_IFLA_PHYS_SWITCH_ID,
377 QEMU_IFLA_LINK_NETNSID,
378 QEMU_IFLA_PHYS_PORT_NAME,
379 QEMU_IFLA_PROTO_DOWN,
380 QEMU_IFLA_GSO_MAX_SEGS,
381 QEMU_IFLA_GSO_MAX_SIZE,
388 QEMU_IFLA_BRPORT_UNSPEC,
389 QEMU_IFLA_BRPORT_STATE,
390 QEMU_IFLA_BRPORT_PRIORITY,
391 QEMU_IFLA_BRPORT_COST,
392 QEMU_IFLA_BRPORT_MODE,
393 QEMU_IFLA_BRPORT_GUARD,
394 QEMU_IFLA_BRPORT_PROTECT,
395 QEMU_IFLA_BRPORT_FAST_LEAVE,
396 QEMU_IFLA_BRPORT_LEARNING,
397 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
398 QEMU_IFLA_BRPORT_PROXYARP,
399 QEMU_IFLA_BRPORT_LEARNING_SYNC,
400 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
401 QEMU_IFLA_BRPORT_ROOT_ID,
402 QEMU_IFLA_BRPORT_BRIDGE_ID,
403 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
404 QEMU_IFLA_BRPORT_DESIGNATED_COST,
407 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
408 QEMU_IFLA_BRPORT_CONFIG_PENDING,
409 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
410 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
411 QEMU_IFLA_BRPORT_HOLD_TIMER,
412 QEMU_IFLA_BRPORT_FLUSH,
413 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
414 QEMU_IFLA_BRPORT_PAD,
415 QEMU___IFLA_BRPORT_MAX
419 QEMU_IFLA_INFO_UNSPEC,
422 QEMU_IFLA_INFO_XSTATS,
423 QEMU_IFLA_INFO_SLAVE_KIND,
424 QEMU_IFLA_INFO_SLAVE_DATA,
425 QEMU___IFLA_INFO_MAX,
429 QEMU_IFLA_INET_UNSPEC,
431 QEMU___IFLA_INET_MAX,
435 QEMU_IFLA_INET6_UNSPEC,
436 QEMU_IFLA_INET6_FLAGS,
437 QEMU_IFLA_INET6_CONF,
438 QEMU_IFLA_INET6_STATS,
439 QEMU_IFLA_INET6_MCAST,
440 QEMU_IFLA_INET6_CACHEINFO,
441 QEMU_IFLA_INET6_ICMP6STATS,
442 QEMU_IFLA_INET6_TOKEN,
443 QEMU_IFLA_INET6_ADDR_GEN_MODE,
444 QEMU___IFLA_INET6_MAX
447 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
448 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
449 typedef struct TargetFdTrans {
450 TargetFdDataFunc host_to_target_data;
451 TargetFdDataFunc target_to_host_data;
452 TargetFdAddrFunc target_to_host_addr;
455 static TargetFdTrans **target_fd_trans;
457 static unsigned int target_fd_max;
459 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
461 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
462 return target_fd_trans[fd]->target_to_host_data;
467 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
469 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
470 return target_fd_trans[fd]->host_to_target_data;
475 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
477 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
478 return target_fd_trans[fd]->target_to_host_addr;
483 static void fd_trans_register(int fd, TargetFdTrans *trans)
487 if (fd >= target_fd_max) {
488 oldmax = target_fd_max;
489 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
490 target_fd_trans = g_renew(TargetFdTrans *,
491 target_fd_trans, target_fd_max);
492 memset((void *)(target_fd_trans + oldmax), 0,
493 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
495 target_fd_trans[fd] = trans;
498 static void fd_trans_unregister(int fd)
500 if (fd >= 0 && fd < target_fd_max) {
501 target_fd_trans[fd] = NULL;
505 static void fd_trans_dup(int oldfd, int newfd)
507 fd_trans_unregister(newfd);
508 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
509 fd_trans_register(newfd, target_fd_trans[oldfd]);
513 static int sys_getcwd1(char *buf, size_t size)
515 if (getcwd(buf, size) == NULL) {
516 /* getcwd() sets errno */
519 return strlen(buf)+1;
522 #ifdef TARGET_NR_utimensat
523 #ifdef CONFIG_UTIMENSAT
524 static int sys_utimensat(int dirfd, const char *pathname,
525 const struct timespec times[2], int flags)
527 if (pathname == NULL)
528 return futimens(dirfd, times);
530 return utimensat(dirfd, pathname, times, flags);
532 #elif defined(__NR_utimensat)
533 #define __NR_sys_utimensat __NR_utimensat
534 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
535 const struct timespec *,tsp,int,flags)
537 static int sys_utimensat(int dirfd, const char *pathname,
538 const struct timespec times[2], int flags)
544 #endif /* TARGET_NR_utimensat */
546 #ifdef CONFIG_INOTIFY
547 #include <sys/inotify.h>
549 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
550 static int sys_inotify_init(void)
552 return (inotify_init());
555 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
556 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
558 return (inotify_add_watch(fd, pathname, mask));
561 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
562 static int sys_inotify_rm_watch(int fd, int32_t wd)
564 return (inotify_rm_watch(fd, wd));
567 #ifdef CONFIG_INOTIFY1
568 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
569 static int sys_inotify_init1(int flags)
571 return (inotify_init1(flags));
576 /* Userspace can usually survive runtime without inotify */
577 #undef TARGET_NR_inotify_init
578 #undef TARGET_NR_inotify_init1
579 #undef TARGET_NR_inotify_add_watch
580 #undef TARGET_NR_inotify_rm_watch
581 #endif /* CONFIG_INOTIFY */
583 #if defined(TARGET_NR_prlimit64)
584 #ifndef __NR_prlimit64
585 # define __NR_prlimit64 -1
587 #define __NR_sys_prlimit64 __NR_prlimit64
588 /* The glibc rlimit structure may not be that used by the underlying syscall */
589 struct host_rlimit64 {
593 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
594 const struct host_rlimit64 *, new_limit,
595 struct host_rlimit64 *, old_limit)
599 #if defined(TARGET_NR_timer_create)
600 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
601 static timer_t g_posix_timers[32] = { 0, } ;
603 static inline int next_free_host_timer(void)
606 /* FIXME: Does finding the next free slot require a lock? */
607 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
608 if (g_posix_timers[k] == 0) {
609 g_posix_timers[k] = (timer_t) 1;
617 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
619 static inline int regpairs_aligned(void *cpu_env) {
620 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
622 #elif defined(TARGET_MIPS)
623 static inline int regpairs_aligned(void *cpu_env) { return 1; }
624 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
625 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
626 * of registers which translates to the same as ARM/MIPS, because we start with
628 static inline int regpairs_aligned(void *cpu_env) { return 1; }
630 static inline int regpairs_aligned(void *cpu_env) { return 0; }
633 #define ERRNO_TABLE_SIZE 1200
635 /* target_to_host_errno_table[] is initialized from
636 * host_to_target_errno_table[] in syscall_init(). */
637 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
641 * This list is the union of errno values overridden in asm-<arch>/errno.h
642 * minus the errnos that are not actually generic to all archs.
644 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
645 [EAGAIN] = TARGET_EAGAIN,
646 [EIDRM] = TARGET_EIDRM,
647 [ECHRNG] = TARGET_ECHRNG,
648 [EL2NSYNC] = TARGET_EL2NSYNC,
649 [EL3HLT] = TARGET_EL3HLT,
650 [EL3RST] = TARGET_EL3RST,
651 [ELNRNG] = TARGET_ELNRNG,
652 [EUNATCH] = TARGET_EUNATCH,
653 [ENOCSI] = TARGET_ENOCSI,
654 [EL2HLT] = TARGET_EL2HLT,
655 [EDEADLK] = TARGET_EDEADLK,
656 [ENOLCK] = TARGET_ENOLCK,
657 [EBADE] = TARGET_EBADE,
658 [EBADR] = TARGET_EBADR,
659 [EXFULL] = TARGET_EXFULL,
660 [ENOANO] = TARGET_ENOANO,
661 [EBADRQC] = TARGET_EBADRQC,
662 [EBADSLT] = TARGET_EBADSLT,
663 [EBFONT] = TARGET_EBFONT,
664 [ENOSTR] = TARGET_ENOSTR,
665 [ENODATA] = TARGET_ENODATA,
666 [ETIME] = TARGET_ETIME,
667 [ENOSR] = TARGET_ENOSR,
668 [ENONET] = TARGET_ENONET,
669 [ENOPKG] = TARGET_ENOPKG,
670 [EREMOTE] = TARGET_EREMOTE,
671 [ENOLINK] = TARGET_ENOLINK,
672 [EADV] = TARGET_EADV,
673 [ESRMNT] = TARGET_ESRMNT,
674 [ECOMM] = TARGET_ECOMM,
675 [EPROTO] = TARGET_EPROTO,
676 [EDOTDOT] = TARGET_EDOTDOT,
677 [EMULTIHOP] = TARGET_EMULTIHOP,
678 [EBADMSG] = TARGET_EBADMSG,
679 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
680 [EOVERFLOW] = TARGET_EOVERFLOW,
681 [ENOTUNIQ] = TARGET_ENOTUNIQ,
682 [EBADFD] = TARGET_EBADFD,
683 [EREMCHG] = TARGET_EREMCHG,
684 [ELIBACC] = TARGET_ELIBACC,
685 [ELIBBAD] = TARGET_ELIBBAD,
686 [ELIBSCN] = TARGET_ELIBSCN,
687 [ELIBMAX] = TARGET_ELIBMAX,
688 [ELIBEXEC] = TARGET_ELIBEXEC,
689 [EILSEQ] = TARGET_EILSEQ,
690 [ENOSYS] = TARGET_ENOSYS,
691 [ELOOP] = TARGET_ELOOP,
692 [ERESTART] = TARGET_ERESTART,
693 [ESTRPIPE] = TARGET_ESTRPIPE,
694 [ENOTEMPTY] = TARGET_ENOTEMPTY,
695 [EUSERS] = TARGET_EUSERS,
696 [ENOTSOCK] = TARGET_ENOTSOCK,
697 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
698 [EMSGSIZE] = TARGET_EMSGSIZE,
699 [EPROTOTYPE] = TARGET_EPROTOTYPE,
700 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
701 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
702 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
703 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
704 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
705 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
706 [EADDRINUSE] = TARGET_EADDRINUSE,
707 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
708 [ENETDOWN] = TARGET_ENETDOWN,
709 [ENETUNREACH] = TARGET_ENETUNREACH,
710 [ENETRESET] = TARGET_ENETRESET,
711 [ECONNABORTED] = TARGET_ECONNABORTED,
712 [ECONNRESET] = TARGET_ECONNRESET,
713 [ENOBUFS] = TARGET_ENOBUFS,
714 [EISCONN] = TARGET_EISCONN,
715 [ENOTCONN] = TARGET_ENOTCONN,
716 [EUCLEAN] = TARGET_EUCLEAN,
717 [ENOTNAM] = TARGET_ENOTNAM,
718 [ENAVAIL] = TARGET_ENAVAIL,
719 [EISNAM] = TARGET_EISNAM,
720 [EREMOTEIO] = TARGET_EREMOTEIO,
721 [ESHUTDOWN] = TARGET_ESHUTDOWN,
722 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
723 [ETIMEDOUT] = TARGET_ETIMEDOUT,
724 [ECONNREFUSED] = TARGET_ECONNREFUSED,
725 [EHOSTDOWN] = TARGET_EHOSTDOWN,
726 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
727 [EALREADY] = TARGET_EALREADY,
728 [EINPROGRESS] = TARGET_EINPROGRESS,
729 [ESTALE] = TARGET_ESTALE,
730 [ECANCELED] = TARGET_ECANCELED,
731 [ENOMEDIUM] = TARGET_ENOMEDIUM,
732 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
734 [ENOKEY] = TARGET_ENOKEY,
737 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
740 [EKEYREVOKED] = TARGET_EKEYREVOKED,
743 [EKEYREJECTED] = TARGET_EKEYREJECTED,
746 [EOWNERDEAD] = TARGET_EOWNERDEAD,
748 #ifdef ENOTRECOVERABLE
749 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
753 static inline int host_to_target_errno(int err)
755 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
756 host_to_target_errno_table[err]) {
757 return host_to_target_errno_table[err];
762 static inline int target_to_host_errno(int err)
764 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
765 target_to_host_errno_table[err]) {
766 return target_to_host_errno_table[err];
771 static inline abi_long get_errno(abi_long ret)
774 return -host_to_target_errno(errno);
779 static inline int is_error(abi_long ret)
781 return (abi_ulong)ret >= (abi_ulong)(-4096);
784 const char *target_strerror(int err)
786 if (err == TARGET_ERESTARTSYS) {
787 return "To be restarted";
789 if (err == TARGET_QEMU_ESIGRETURN) {
790 return "Successful exit from sigreturn";
793 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
796 return strerror(target_to_host_errno(err));
799 #define safe_syscall0(type, name) \
800 static type safe_##name(void) \
802 return safe_syscall(__NR_##name); \
805 #define safe_syscall1(type, name, type1, arg1) \
806 static type safe_##name(type1 arg1) \
808 return safe_syscall(__NR_##name, arg1); \
811 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
812 static type safe_##name(type1 arg1, type2 arg2) \
814 return safe_syscall(__NR_##name, arg1, arg2); \
817 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
818 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
820 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
823 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
825 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
827 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
830 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
831 type4, arg4, type5, arg5) \
832 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
835 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
838 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
839 type4, arg4, type5, arg5, type6, arg6) \
840 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
841 type5 arg5, type6 arg6) \
843 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
846 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
847 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
848 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
849 int, flags, mode_t, mode)
850 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
851 struct rusage *, rusage)
852 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
853 int, options, struct rusage *, rusage)
854 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
855 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
856 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
857 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
858 struct timespec *, tsp, const sigset_t *, sigmask,
860 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
861 int, maxevents, int, timeout, const sigset_t *, sigmask,
863 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
864 const struct timespec *,timeout,int *,uaddr2,int,val3)
865 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
866 safe_syscall2(int, kill, pid_t, pid, int, sig)
867 safe_syscall2(int, tkill, int, tid, int, sig)
868 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
869 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
870 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
871 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
873 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
874 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
875 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
876 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
877 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
878 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
879 safe_syscall2(int, flock, int, fd, int, operation)
880 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
881 const struct timespec *, uts, size_t, sigsetsize)
882 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
884 safe_syscall2(int, nanosleep, const struct timespec *, req,
885 struct timespec *, rem)
886 #ifdef TARGET_NR_clock_nanosleep
887 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
888 const struct timespec *, req, struct timespec *, rem)
891 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
893 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
894 long, msgtype, int, flags)
895 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
896 unsigned, nsops, const struct timespec *, timeout)
898 /* This host kernel architecture uses a single ipc syscall; fake up
899 * wrappers for the sub-operations to hide this implementation detail.
900 * Annoyingly we can't include linux/ipc.h to get the constant definitions
901 * for the call parameter because some structs in there conflict with the
902 * sys/ipc.h ones. So we just define them here, and rely on them being
903 * the same for all host architectures.
905 #define Q_SEMTIMEDOP 4
908 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
910 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
911 void *, ptr, long, fifth)
912 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
914 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
916 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
918 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
920 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
921 const struct timespec *timeout)
923 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
927 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
928 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
929 size_t, len, unsigned, prio, const struct timespec *, timeout)
930 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
931 size_t, len, unsigned *, prio, const struct timespec *, timeout)
933 /* We do ioctl like this rather than via safe_syscall3 to preserve the
934 * "third argument might be integer or pointer or not present" behaviour of
937 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
938 /* Similarly for fcntl. Note that callers must always:
939 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
940 * use the flock64 struct rather than unsuffixed flock
941 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
944 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
946 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
949 static inline int host_to_target_sock_type(int host_type)
953 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
955 target_type = TARGET_SOCK_DGRAM;
958 target_type = TARGET_SOCK_STREAM;
961 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
965 #if defined(SOCK_CLOEXEC)
966 if (host_type & SOCK_CLOEXEC) {
967 target_type |= TARGET_SOCK_CLOEXEC;
971 #if defined(SOCK_NONBLOCK)
972 if (host_type & SOCK_NONBLOCK) {
973 target_type |= TARGET_SOCK_NONBLOCK;
980 static abi_ulong target_brk;
981 static abi_ulong target_original_brk;
982 static abi_ulong brk_page;
984 void target_set_brk(abi_ulong new_brk)
986 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
987 brk_page = HOST_PAGE_ALIGN(target_brk);
990 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
991 #define DEBUGF_BRK(message, args...)
993 /* do_brk() must return target values and target errnos. */
994 abi_long do_brk(abi_ulong new_brk)
996 abi_long mapped_addr;
997 abi_ulong new_alloc_size;
999 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1002 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1005 if (new_brk < target_original_brk) {
1006 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1011 /* If the new brk is less than the highest page reserved to the
1012 * target heap allocation, set it and we're almost done... */
1013 if (new_brk <= brk_page) {
1014 /* Heap contents are initialized to zero, as for anonymous
1016 if (new_brk > target_brk) {
1017 memset(g2h(target_brk), 0, new_brk - target_brk);
1019 target_brk = new_brk;
1020 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1024 /* We need to allocate more memory after the brk... Note that
1025 * we don't use MAP_FIXED because that will map over the top of
1026 * any existing mapping (like the one with the host libc or qemu
1027 * itself); instead we treat "mapped but at wrong address" as
1028 * a failure and unmap again.
1030 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1031 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1032 PROT_READ|PROT_WRITE,
1033 MAP_ANON|MAP_PRIVATE, 0, 0));
1035 if (mapped_addr == brk_page) {
1036 /* Heap contents are initialized to zero, as for anonymous
1037 * mapped pages. Technically the new pages are already
1038 * initialized to zero since they *are* anonymous mapped
1039 * pages, however we have to take care with the contents that
1040 * come from the remaining part of the previous page: it may
1041 * contains garbage data due to a previous heap usage (grown
1042 * then shrunken). */
1043 memset(g2h(target_brk), 0, brk_page - target_brk);
1045 target_brk = new_brk;
1046 brk_page = HOST_PAGE_ALIGN(target_brk);
1047 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1050 } else if (mapped_addr != -1) {
1051 /* Mapped but at wrong address, meaning there wasn't actually
1052 * enough space for this brk.
1054 target_munmap(mapped_addr, new_alloc_size);
1056 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1059 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1062 #if defined(TARGET_ALPHA)
1063 /* We (partially) emulate OSF/1 on Alpha, which requires we
1064 return a proper errno, not an unchanged brk value. */
1065 return -TARGET_ENOMEM;
1067 /* For everything else, return the previous break. */
1071 static inline abi_long copy_from_user_fdset(fd_set *fds,
1072 abi_ulong target_fds_addr,
1076 abi_ulong b, *target_fds;
1078 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1079 if (!(target_fds = lock_user(VERIFY_READ,
1081 sizeof(abi_ulong) * nw,
1083 return -TARGET_EFAULT;
1087 for (i = 0; i < nw; i++) {
1088 /* grab the abi_ulong */
1089 __get_user(b, &target_fds[i]);
1090 for (j = 0; j < TARGET_ABI_BITS; j++) {
1091 /* check the bit inside the abi_ulong */
1098 unlock_user(target_fds, target_fds_addr, 0);
1103 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1104 abi_ulong target_fds_addr,
1107 if (target_fds_addr) {
1108 if (copy_from_user_fdset(fds, target_fds_addr, n))
1109 return -TARGET_EFAULT;
1117 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1123 abi_ulong *target_fds;
1125 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1126 if (!(target_fds = lock_user(VERIFY_WRITE,
1128 sizeof(abi_ulong) * nw,
1130 return -TARGET_EFAULT;
1133 for (i = 0; i < nw; i++) {
1135 for (j = 0; j < TARGET_ABI_BITS; j++) {
1136 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1139 __put_user(v, &target_fds[i]);
1142 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1147 #if defined(__alpha__)
1148 #define HOST_HZ 1024
1153 static inline abi_long host_to_target_clock_t(long ticks)
1155 #if HOST_HZ == TARGET_HZ
1158 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1162 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1163 const struct rusage *rusage)
1165 struct target_rusage *target_rusage;
1167 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1168 return -TARGET_EFAULT;
1169 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1170 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1171 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1172 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1173 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1174 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1175 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1176 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1177 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1178 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1179 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1180 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1181 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1182 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1183 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1184 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1185 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1186 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1187 unlock_user_struct(target_rusage, target_addr, 1);
1192 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1194 abi_ulong target_rlim_swap;
1197 target_rlim_swap = tswapal(target_rlim);
1198 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1199 return RLIM_INFINITY;
1201 result = target_rlim_swap;
1202 if (target_rlim_swap != (rlim_t)result)
1203 return RLIM_INFINITY;
1208 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1210 abi_ulong target_rlim_swap;
1213 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1214 target_rlim_swap = TARGET_RLIM_INFINITY;
1216 target_rlim_swap = rlim;
1217 result = tswapal(target_rlim_swap);
1222 static inline int target_to_host_resource(int code)
1225 case TARGET_RLIMIT_AS:
1227 case TARGET_RLIMIT_CORE:
1229 case TARGET_RLIMIT_CPU:
1231 case TARGET_RLIMIT_DATA:
1233 case TARGET_RLIMIT_FSIZE:
1234 return RLIMIT_FSIZE;
1235 case TARGET_RLIMIT_LOCKS:
1236 return RLIMIT_LOCKS;
1237 case TARGET_RLIMIT_MEMLOCK:
1238 return RLIMIT_MEMLOCK;
1239 case TARGET_RLIMIT_MSGQUEUE:
1240 return RLIMIT_MSGQUEUE;
1241 case TARGET_RLIMIT_NICE:
1243 case TARGET_RLIMIT_NOFILE:
1244 return RLIMIT_NOFILE;
1245 case TARGET_RLIMIT_NPROC:
1246 return RLIMIT_NPROC;
1247 case TARGET_RLIMIT_RSS:
1249 case TARGET_RLIMIT_RTPRIO:
1250 return RLIMIT_RTPRIO;
1251 case TARGET_RLIMIT_SIGPENDING:
1252 return RLIMIT_SIGPENDING;
1253 case TARGET_RLIMIT_STACK:
1254 return RLIMIT_STACK;
1260 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1261 abi_ulong target_tv_addr)
1263 struct target_timeval *target_tv;
1265 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1266 return -TARGET_EFAULT;
1268 __get_user(tv->tv_sec, &target_tv->tv_sec);
1269 __get_user(tv->tv_usec, &target_tv->tv_usec);
1271 unlock_user_struct(target_tv, target_tv_addr, 0);
1276 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1277 const struct timeval *tv)
1279 struct target_timeval *target_tv;
1281 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1282 return -TARGET_EFAULT;
1284 __put_user(tv->tv_sec, &target_tv->tv_sec);
1285 __put_user(tv->tv_usec, &target_tv->tv_usec);
1287 unlock_user_struct(target_tv, target_tv_addr, 1);
1292 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1293 abi_ulong target_tz_addr)
1295 struct target_timezone *target_tz;
1297 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1298 return -TARGET_EFAULT;
1301 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1302 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1304 unlock_user_struct(target_tz, target_tz_addr, 0);
1309 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1312 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1313 abi_ulong target_mq_attr_addr)
1315 struct target_mq_attr *target_mq_attr;
1317 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1318 target_mq_attr_addr, 1))
1319 return -TARGET_EFAULT;
1321 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1322 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1323 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1324 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1326 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1331 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1332 const struct mq_attr *attr)
1334 struct target_mq_attr *target_mq_attr;
1336 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1337 target_mq_attr_addr, 0))
1338 return -TARGET_EFAULT;
1340 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1341 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1342 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1343 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1345 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1351 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1352 /* do_select() must return target values and target errnos. */
1353 static abi_long do_select(int n,
1354 abi_ulong rfd_addr, abi_ulong wfd_addr,
1355 abi_ulong efd_addr, abi_ulong target_tv_addr)
1357 fd_set rfds, wfds, efds;
1358 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1360 struct timespec ts, *ts_ptr;
1363 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1367 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1371 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1376 if (target_tv_addr) {
1377 if (copy_from_user_timeval(&tv, target_tv_addr))
1378 return -TARGET_EFAULT;
1379 ts.tv_sec = tv.tv_sec;
1380 ts.tv_nsec = tv.tv_usec * 1000;
1386 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1389 if (!is_error(ret)) {
1390 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1391 return -TARGET_EFAULT;
1392 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1393 return -TARGET_EFAULT;
1394 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1395 return -TARGET_EFAULT;
1397 if (target_tv_addr) {
1398 tv.tv_sec = ts.tv_sec;
1399 tv.tv_usec = ts.tv_nsec / 1000;
1400 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1401 return -TARGET_EFAULT;
1410 static abi_long do_pipe2(int host_pipe[], int flags)
1413 return pipe2(host_pipe, flags);
1419 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1420 int flags, int is_pipe2)
1424 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1427 return get_errno(ret);
1429 /* Several targets have special calling conventions for the original
1430 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1432 #if defined(TARGET_ALPHA)
1433 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1434 return host_pipe[0];
1435 #elif defined(TARGET_MIPS)
1436 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1437 return host_pipe[0];
1438 #elif defined(TARGET_SH4)
1439 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1440 return host_pipe[0];
1441 #elif defined(TARGET_SPARC)
1442 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1443 return host_pipe[0];
1447 if (put_user_s32(host_pipe[0], pipedes)
1448 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1449 return -TARGET_EFAULT;
1450 return get_errno(ret);
1453 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1454 abi_ulong target_addr,
1457 struct target_ip_mreqn *target_smreqn;
1459 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1461 return -TARGET_EFAULT;
1462 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1463 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1464 if (len == sizeof(struct target_ip_mreqn))
1465 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1466 unlock_user(target_smreqn, target_addr, 0);
1471 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1472 abi_ulong target_addr,
1475 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1476 sa_family_t sa_family;
1477 struct target_sockaddr *target_saddr;
1479 if (fd_trans_target_to_host_addr(fd)) {
1480 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1483 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1485 return -TARGET_EFAULT;
1487 sa_family = tswap16(target_saddr->sa_family);
1489 /* Oops. The caller might send a incomplete sun_path; sun_path
1490 * must be terminated by \0 (see the manual page), but
1491 * unfortunately it is quite common to specify sockaddr_un
1492 * length as "strlen(x->sun_path)" while it should be
1493 * "strlen(...) + 1". We'll fix that here if needed.
1494 * Linux kernel has a similar feature.
1497 if (sa_family == AF_UNIX) {
1498 if (len < unix_maxlen && len > 0) {
1499 char *cp = (char*)target_saddr;
1501 if ( cp[len-1] && !cp[len] )
1504 if (len > unix_maxlen)
1508 memcpy(addr, target_saddr, len);
1509 addr->sa_family = sa_family;
1510 if (sa_family == AF_NETLINK) {
1511 struct sockaddr_nl *nladdr;
1513 nladdr = (struct sockaddr_nl *)addr;
1514 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1515 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1516 } else if (sa_family == AF_PACKET) {
1517 struct target_sockaddr_ll *lladdr;
1519 lladdr = (struct target_sockaddr_ll *)addr;
1520 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1521 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1523 unlock_user(target_saddr, target_addr, 0);
1528 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1529 struct sockaddr *addr,
1532 struct target_sockaddr *target_saddr;
1538 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1540 return -TARGET_EFAULT;
1541 memcpy(target_saddr, addr, len);
1542 if (len >= offsetof(struct target_sockaddr, sa_family) +
1543 sizeof(target_saddr->sa_family)) {
1544 target_saddr->sa_family = tswap16(addr->sa_family);
1546 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1547 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1548 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1549 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1550 } else if (addr->sa_family == AF_PACKET) {
1551 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1552 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1553 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1555 unlock_user(target_saddr, target_addr, len);
1560 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1561 struct target_msghdr *target_msgh)
1563 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1564 abi_long msg_controllen;
1565 abi_ulong target_cmsg_addr;
1566 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1567 socklen_t space = 0;
1569 msg_controllen = tswapal(target_msgh->msg_controllen);
1570 if (msg_controllen < sizeof (struct target_cmsghdr))
1572 target_cmsg_addr = tswapal(target_msgh->msg_control);
1573 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1574 target_cmsg_start = target_cmsg;
1576 return -TARGET_EFAULT;
1578 while (cmsg && target_cmsg) {
1579 void *data = CMSG_DATA(cmsg);
1580 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1582 int len = tswapal(target_cmsg->cmsg_len)
1583 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1585 space += CMSG_SPACE(len);
1586 if (space > msgh->msg_controllen) {
1587 space -= CMSG_SPACE(len);
1588 /* This is a QEMU bug, since we allocated the payload
1589 * area ourselves (unlike overflow in host-to-target
1590 * conversion, which is just the guest giving us a buffer
1591 * that's too small). It can't happen for the payload types
1592 * we currently support; if it becomes an issue in future
1593 * we would need to improve our allocation strategy to
1594 * something more intelligent than "twice the size of the
1595 * target buffer we're reading from".
1597 gemu_log("Host cmsg overflow\n");
1601 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1602 cmsg->cmsg_level = SOL_SOCKET;
1604 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1606 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1607 cmsg->cmsg_len = CMSG_LEN(len);
1609 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1610 int *fd = (int *)data;
1611 int *target_fd = (int *)target_data;
1612 int i, numfds = len / sizeof(int);
1614 for (i = 0; i < numfds; i++) {
1615 __get_user(fd[i], target_fd + i);
1617 } else if (cmsg->cmsg_level == SOL_SOCKET
1618 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1619 struct ucred *cred = (struct ucred *)data;
1620 struct target_ucred *target_cred =
1621 (struct target_ucred *)target_data;
1623 __get_user(cred->pid, &target_cred->pid);
1624 __get_user(cred->uid, &target_cred->uid);
1625 __get_user(cred->gid, &target_cred->gid);
1627 gemu_log("Unsupported ancillary data: %d/%d\n",
1628 cmsg->cmsg_level, cmsg->cmsg_type);
1629 memcpy(data, target_data, len);
1632 cmsg = CMSG_NXTHDR(msgh, cmsg);
1633 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1636 unlock_user(target_cmsg, target_cmsg_addr, 0);
1638 msgh->msg_controllen = space;
1642 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1643 struct msghdr *msgh)
1645 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1646 abi_long msg_controllen;
1647 abi_ulong target_cmsg_addr;
1648 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1649 socklen_t space = 0;
1651 msg_controllen = tswapal(target_msgh->msg_controllen);
1652 if (msg_controllen < sizeof (struct target_cmsghdr))
1654 target_cmsg_addr = tswapal(target_msgh->msg_control);
1655 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1656 target_cmsg_start = target_cmsg;
1658 return -TARGET_EFAULT;
1660 while (cmsg && target_cmsg) {
1661 void *data = CMSG_DATA(cmsg);
1662 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1664 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1665 int tgt_len, tgt_space;
1667 /* We never copy a half-header but may copy half-data;
1668 * this is Linux's behaviour in put_cmsg(). Note that
1669 * truncation here is a guest problem (which we report
1670 * to the guest via the CTRUNC bit), unlike truncation
1671 * in target_to_host_cmsg, which is a QEMU bug.
1673 if (msg_controllen < sizeof(struct cmsghdr)) {
1674 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1678 if (cmsg->cmsg_level == SOL_SOCKET) {
1679 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1681 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1683 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1685 tgt_len = TARGET_CMSG_LEN(len);
1687 /* Payload types which need a different size of payload on
1688 * the target must adjust tgt_len here.
1690 switch (cmsg->cmsg_level) {
1692 switch (cmsg->cmsg_type) {
1694 tgt_len = sizeof(struct target_timeval);
1703 if (msg_controllen < tgt_len) {
1704 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1705 tgt_len = msg_controllen;
1708 /* We must now copy-and-convert len bytes of payload
1709 * into tgt_len bytes of destination space. Bear in mind
1710 * that in both source and destination we may be dealing
1711 * with a truncated value!
1713 switch (cmsg->cmsg_level) {
1715 switch (cmsg->cmsg_type) {
1718 int *fd = (int *)data;
1719 int *target_fd = (int *)target_data;
1720 int i, numfds = tgt_len / sizeof(int);
1722 for (i = 0; i < numfds; i++) {
1723 __put_user(fd[i], target_fd + i);
1729 struct timeval *tv = (struct timeval *)data;
1730 struct target_timeval *target_tv =
1731 (struct target_timeval *)target_data;
1733 if (len != sizeof(struct timeval) ||
1734 tgt_len != sizeof(struct target_timeval)) {
1738 /* copy struct timeval to target */
1739 __put_user(tv->tv_sec, &target_tv->tv_sec);
1740 __put_user(tv->tv_usec, &target_tv->tv_usec);
1743 case SCM_CREDENTIALS:
1745 struct ucred *cred = (struct ucred *)data;
1746 struct target_ucred *target_cred =
1747 (struct target_ucred *)target_data;
1749 __put_user(cred->pid, &target_cred->pid);
1750 __put_user(cred->uid, &target_cred->uid);
1751 __put_user(cred->gid, &target_cred->gid);
1761 gemu_log("Unsupported ancillary data: %d/%d\n",
1762 cmsg->cmsg_level, cmsg->cmsg_type);
1763 memcpy(target_data, data, MIN(len, tgt_len));
1764 if (tgt_len > len) {
1765 memset(target_data + len, 0, tgt_len - len);
1769 target_cmsg->cmsg_len = tswapal(tgt_len);
1770 tgt_space = TARGET_CMSG_SPACE(len);
1771 if (msg_controllen < tgt_space) {
1772 tgt_space = msg_controllen;
1774 msg_controllen -= tgt_space;
1776 cmsg = CMSG_NXTHDR(msgh, cmsg);
1777 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1780 unlock_user(target_cmsg, target_cmsg_addr, space);
1782 target_msgh->msg_controllen = tswapal(space);
1786 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1788 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1789 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1790 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1791 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1792 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1795 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1797 abi_long (*host_to_target_nlmsg)
1798 (struct nlmsghdr *))
1803 while (len > sizeof(struct nlmsghdr)) {
1805 nlmsg_len = nlh->nlmsg_len;
1806 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1811 switch (nlh->nlmsg_type) {
1813 tswap_nlmsghdr(nlh);
1819 struct nlmsgerr *e = NLMSG_DATA(nlh);
1820 e->error = tswap32(e->error);
1821 tswap_nlmsghdr(&e->msg);
1822 tswap_nlmsghdr(nlh);
1826 ret = host_to_target_nlmsg(nlh);
1828 tswap_nlmsghdr(nlh);
1833 tswap_nlmsghdr(nlh);
1834 len -= NLMSG_ALIGN(nlmsg_len);
1835 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1840 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1842 abi_long (*target_to_host_nlmsg)
1843 (struct nlmsghdr *))
1847 while (len > sizeof(struct nlmsghdr)) {
1848 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1849 tswap32(nlh->nlmsg_len) > len) {
1852 tswap_nlmsghdr(nlh);
1853 switch (nlh->nlmsg_type) {
1860 struct nlmsgerr *e = NLMSG_DATA(nlh);
1861 e->error = tswap32(e->error);
1862 tswap_nlmsghdr(&e->msg);
1866 ret = target_to_host_nlmsg(nlh);
1871 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1872 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1877 #ifdef CONFIG_RTNETLINK
1878 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1879 size_t len, void *context,
1880 abi_long (*host_to_target_nlattr)
1884 unsigned short nla_len;
1887 while (len > sizeof(struct nlattr)) {
1888 nla_len = nlattr->nla_len;
1889 if (nla_len < sizeof(struct nlattr) ||
1893 ret = host_to_target_nlattr(nlattr, context);
1894 nlattr->nla_len = tswap16(nlattr->nla_len);
1895 nlattr->nla_type = tswap16(nlattr->nla_type);
1899 len -= NLA_ALIGN(nla_len);
1900 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1905 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1907 abi_long (*host_to_target_rtattr)
1910 unsigned short rta_len;
1913 while (len > sizeof(struct rtattr)) {
1914 rta_len = rtattr->rta_len;
1915 if (rta_len < sizeof(struct rtattr) ||
1919 ret = host_to_target_rtattr(rtattr);
1920 rtattr->rta_len = tswap16(rtattr->rta_len);
1921 rtattr->rta_type = tswap16(rtattr->rta_type);
1925 len -= RTA_ALIGN(rta_len);
1926 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1931 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1933 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
1940 switch (nlattr->nla_type) {
1942 case QEMU_IFLA_BR_FDB_FLUSH:
1945 case QEMU_IFLA_BR_GROUP_ADDR:
1948 case QEMU_IFLA_BR_VLAN_FILTERING:
1949 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
1950 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
1951 case QEMU_IFLA_BR_MCAST_ROUTER:
1952 case QEMU_IFLA_BR_MCAST_SNOOPING:
1953 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
1954 case QEMU_IFLA_BR_MCAST_QUERIER:
1955 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
1956 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
1957 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
1960 case QEMU_IFLA_BR_PRIORITY:
1961 case QEMU_IFLA_BR_VLAN_PROTOCOL:
1962 case QEMU_IFLA_BR_GROUP_FWD_MASK:
1963 case QEMU_IFLA_BR_ROOT_PORT:
1964 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
1965 u16 = NLA_DATA(nlattr);
1966 *u16 = tswap16(*u16);
1969 case QEMU_IFLA_BR_FORWARD_DELAY:
1970 case QEMU_IFLA_BR_HELLO_TIME:
1971 case QEMU_IFLA_BR_MAX_AGE:
1972 case QEMU_IFLA_BR_AGEING_TIME:
1973 case QEMU_IFLA_BR_STP_STATE:
1974 case QEMU_IFLA_BR_ROOT_PATH_COST:
1975 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
1976 case QEMU_IFLA_BR_MCAST_HASH_MAX:
1977 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
1978 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
1979 u32 = NLA_DATA(nlattr);
1980 *u32 = tswap32(*u32);
1983 case QEMU_IFLA_BR_HELLO_TIMER:
1984 case QEMU_IFLA_BR_TCN_TIMER:
1985 case QEMU_IFLA_BR_GC_TIMER:
1986 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
1987 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
1988 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
1989 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
1990 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
1991 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
1992 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
1993 u64 = NLA_DATA(nlattr);
1994 *u64 = tswap64(*u64);
1996 /* ifla_bridge_id: uin8_t[] */
1997 case QEMU_IFLA_BR_ROOT_ID:
1998 case QEMU_IFLA_BR_BRIDGE_ID:
2001 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2007 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2014 switch (nlattr->nla_type) {
2016 case QEMU_IFLA_BRPORT_STATE:
2017 case QEMU_IFLA_BRPORT_MODE:
2018 case QEMU_IFLA_BRPORT_GUARD:
2019 case QEMU_IFLA_BRPORT_PROTECT:
2020 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2021 case QEMU_IFLA_BRPORT_LEARNING:
2022 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2023 case QEMU_IFLA_BRPORT_PROXYARP:
2024 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2025 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2026 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2027 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2028 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2031 case QEMU_IFLA_BRPORT_PRIORITY:
2032 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2033 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2034 case QEMU_IFLA_BRPORT_ID:
2035 case QEMU_IFLA_BRPORT_NO:
2036 u16 = NLA_DATA(nlattr);
2037 *u16 = tswap16(*u16);
2040 case QEMU_IFLA_BRPORT_COST:
2041 u32 = NLA_DATA(nlattr);
2042 *u32 = tswap32(*u32);
2045 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2046 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2047 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2048 u64 = NLA_DATA(nlattr);
2049 *u64 = tswap64(*u64);
2051 /* ifla_bridge_id: uint8_t[] */
2052 case QEMU_IFLA_BRPORT_ROOT_ID:
2053 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2056 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2062 struct linkinfo_context {
2069 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2072 struct linkinfo_context *li_context = context;
2074 switch (nlattr->nla_type) {
2076 case QEMU_IFLA_INFO_KIND:
2077 li_context->name = NLA_DATA(nlattr);
2078 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2080 case QEMU_IFLA_INFO_SLAVE_KIND:
2081 li_context->slave_name = NLA_DATA(nlattr);
2082 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2085 case QEMU_IFLA_INFO_XSTATS:
2086 /* FIXME: only used by CAN */
2089 case QEMU_IFLA_INFO_DATA:
2090 if (strncmp(li_context->name, "bridge",
2091 li_context->len) == 0) {
2092 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2095 host_to_target_data_bridge_nlattr);
2097 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2100 case QEMU_IFLA_INFO_SLAVE_DATA:
2101 if (strncmp(li_context->slave_name, "bridge",
2102 li_context->slave_len) == 0) {
2103 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2106 host_to_target_slave_data_bridge_nlattr);
2108 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2109 li_context->slave_name);
2113 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2120 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2126 switch (nlattr->nla_type) {
2127 case QEMU_IFLA_INET_CONF:
2128 u32 = NLA_DATA(nlattr);
2129 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2131 u32[i] = tswap32(u32[i]);
2135 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2140 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2145 struct ifla_cacheinfo *ci;
2148 switch (nlattr->nla_type) {
2150 case QEMU_IFLA_INET6_TOKEN:
2153 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2156 case QEMU_IFLA_INET6_FLAGS:
2157 u32 = NLA_DATA(nlattr);
2158 *u32 = tswap32(*u32);
2161 case QEMU_IFLA_INET6_CONF:
2162 u32 = NLA_DATA(nlattr);
2163 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2165 u32[i] = tswap32(u32[i]);
2168 /* ifla_cacheinfo */
2169 case QEMU_IFLA_INET6_CACHEINFO:
2170 ci = NLA_DATA(nlattr);
2171 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2172 ci->tstamp = tswap32(ci->tstamp);
2173 ci->reachable_time = tswap32(ci->reachable_time);
2174 ci->retrans_time = tswap32(ci->retrans_time);
2177 case QEMU_IFLA_INET6_STATS:
2178 case QEMU_IFLA_INET6_ICMP6STATS:
2179 u64 = NLA_DATA(nlattr);
2180 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2182 u64[i] = tswap64(u64[i]);
2186 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2191 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2194 switch (nlattr->nla_type) {
2196 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2198 host_to_target_data_inet_nlattr);
2200 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2202 host_to_target_data_inet6_nlattr);
2204 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2210 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2213 struct rtnl_link_stats *st;
2214 struct rtnl_link_stats64 *st64;
2215 struct rtnl_link_ifmap *map;
2216 struct linkinfo_context li_context;
2218 switch (rtattr->rta_type) {
2220 case QEMU_IFLA_ADDRESS:
2221 case QEMU_IFLA_BROADCAST:
2223 case QEMU_IFLA_IFNAME:
2224 case QEMU_IFLA_QDISC:
2227 case QEMU_IFLA_OPERSTATE:
2228 case QEMU_IFLA_LINKMODE:
2229 case QEMU_IFLA_CARRIER:
2230 case QEMU_IFLA_PROTO_DOWN:
2234 case QEMU_IFLA_LINK:
2235 case QEMU_IFLA_WEIGHT:
2236 case QEMU_IFLA_TXQLEN:
2237 case QEMU_IFLA_CARRIER_CHANGES:
2238 case QEMU_IFLA_NUM_RX_QUEUES:
2239 case QEMU_IFLA_NUM_TX_QUEUES:
2240 case QEMU_IFLA_PROMISCUITY:
2241 case QEMU_IFLA_EXT_MASK:
2242 case QEMU_IFLA_LINK_NETNSID:
2243 case QEMU_IFLA_GROUP:
2244 case QEMU_IFLA_MASTER:
2245 case QEMU_IFLA_NUM_VF:
2246 u32 = RTA_DATA(rtattr);
2247 *u32 = tswap32(*u32);
2249 /* struct rtnl_link_stats */
2250 case QEMU_IFLA_STATS:
2251 st = RTA_DATA(rtattr);
2252 st->rx_packets = tswap32(st->rx_packets);
2253 st->tx_packets = tswap32(st->tx_packets);
2254 st->rx_bytes = tswap32(st->rx_bytes);
2255 st->tx_bytes = tswap32(st->tx_bytes);
2256 st->rx_errors = tswap32(st->rx_errors);
2257 st->tx_errors = tswap32(st->tx_errors);
2258 st->rx_dropped = tswap32(st->rx_dropped);
2259 st->tx_dropped = tswap32(st->tx_dropped);
2260 st->multicast = tswap32(st->multicast);
2261 st->collisions = tswap32(st->collisions);
2263 /* detailed rx_errors: */
2264 st->rx_length_errors = tswap32(st->rx_length_errors);
2265 st->rx_over_errors = tswap32(st->rx_over_errors);
2266 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2267 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2268 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2269 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2271 /* detailed tx_errors */
2272 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2273 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2274 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2275 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2276 st->tx_window_errors = tswap32(st->tx_window_errors);
2279 st->rx_compressed = tswap32(st->rx_compressed);
2280 st->tx_compressed = tswap32(st->tx_compressed);
2282 /* struct rtnl_link_stats64 */
2283 case QEMU_IFLA_STATS64:
2284 st64 = RTA_DATA(rtattr);
2285 st64->rx_packets = tswap64(st64->rx_packets);
2286 st64->tx_packets = tswap64(st64->tx_packets);
2287 st64->rx_bytes = tswap64(st64->rx_bytes);
2288 st64->tx_bytes = tswap64(st64->tx_bytes);
2289 st64->rx_errors = tswap64(st64->rx_errors);
2290 st64->tx_errors = tswap64(st64->tx_errors);
2291 st64->rx_dropped = tswap64(st64->rx_dropped);
2292 st64->tx_dropped = tswap64(st64->tx_dropped);
2293 st64->multicast = tswap64(st64->multicast);
2294 st64->collisions = tswap64(st64->collisions);
2296 /* detailed rx_errors: */
2297 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2298 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2299 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2300 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2301 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2302 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2304 /* detailed tx_errors */
2305 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2306 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2307 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2308 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2309 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2312 st64->rx_compressed = tswap64(st64->rx_compressed);
2313 st64->tx_compressed = tswap64(st64->tx_compressed);
2315 /* struct rtnl_link_ifmap */
2317 map = RTA_DATA(rtattr);
2318 map->mem_start = tswap64(map->mem_start);
2319 map->mem_end = tswap64(map->mem_end);
2320 map->base_addr = tswap64(map->base_addr);
2321 map->irq = tswap16(map->irq);
2324 case QEMU_IFLA_LINKINFO:
2325 memset(&li_context, 0, sizeof(li_context));
2326 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2328 host_to_target_data_linkinfo_nlattr);
2329 case QEMU_IFLA_AF_SPEC:
2330 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2332 host_to_target_data_spec_nlattr);
2334 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2340 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2343 struct ifa_cacheinfo *ci;
2345 switch (rtattr->rta_type) {
2346 /* binary: depends on family type */
2356 u32 = RTA_DATA(rtattr);
2357 *u32 = tswap32(*u32);
2359 /* struct ifa_cacheinfo */
2361 ci = RTA_DATA(rtattr);
2362 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2363 ci->ifa_valid = tswap32(ci->ifa_valid);
2364 ci->cstamp = tswap32(ci->cstamp);
2365 ci->tstamp = tswap32(ci->tstamp);
2368 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2374 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2377 switch (rtattr->rta_type) {
2378 /* binary: depends on family type */
2387 u32 = RTA_DATA(rtattr);
2388 *u32 = tswap32(*u32);
2391 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2397 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2398 uint32_t rtattr_len)
2400 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2401 host_to_target_data_link_rtattr);
2404 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2405 uint32_t rtattr_len)
2407 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2408 host_to_target_data_addr_rtattr);
2411 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2412 uint32_t rtattr_len)
2414 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2415 host_to_target_data_route_rtattr);
2418 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2421 struct ifinfomsg *ifi;
2422 struct ifaddrmsg *ifa;
2425 nlmsg_len = nlh->nlmsg_len;
2426 switch (nlh->nlmsg_type) {
2430 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2431 ifi = NLMSG_DATA(nlh);
2432 ifi->ifi_type = tswap16(ifi->ifi_type);
2433 ifi->ifi_index = tswap32(ifi->ifi_index);
2434 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2435 ifi->ifi_change = tswap32(ifi->ifi_change);
2436 host_to_target_link_rtattr(IFLA_RTA(ifi),
2437 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2443 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2444 ifa = NLMSG_DATA(nlh);
2445 ifa->ifa_index = tswap32(ifa->ifa_index);
2446 host_to_target_addr_rtattr(IFA_RTA(ifa),
2447 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2453 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2454 rtm = NLMSG_DATA(nlh);
2455 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2456 host_to_target_route_rtattr(RTM_RTA(rtm),
2457 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2461 return -TARGET_EINVAL;
2466 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2469 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2472 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2474 abi_long (*target_to_host_rtattr)
2479 while (len >= sizeof(struct rtattr)) {
2480 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2481 tswap16(rtattr->rta_len) > len) {
2484 rtattr->rta_len = tswap16(rtattr->rta_len);
2485 rtattr->rta_type = tswap16(rtattr->rta_type);
2486 ret = target_to_host_rtattr(rtattr);
2490 len -= RTA_ALIGN(rtattr->rta_len);
2491 rtattr = (struct rtattr *)(((char *)rtattr) +
2492 RTA_ALIGN(rtattr->rta_len));
2497 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2499 switch (rtattr->rta_type) {
2501 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2507 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2509 switch (rtattr->rta_type) {
2510 /* binary: depends on family type */
2515 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2521 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2524 switch (rtattr->rta_type) {
2525 /* binary: depends on family type */
2532 u32 = RTA_DATA(rtattr);
2533 *u32 = tswap32(*u32);
2536 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2542 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2543 uint32_t rtattr_len)
2545 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2546 target_to_host_data_link_rtattr);
2549 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2550 uint32_t rtattr_len)
2552 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2553 target_to_host_data_addr_rtattr);
2556 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2557 uint32_t rtattr_len)
2559 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2560 target_to_host_data_route_rtattr);
2563 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2565 struct ifinfomsg *ifi;
2566 struct ifaddrmsg *ifa;
2569 switch (nlh->nlmsg_type) {
2574 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2575 ifi = NLMSG_DATA(nlh);
2576 ifi->ifi_type = tswap16(ifi->ifi_type);
2577 ifi->ifi_index = tswap32(ifi->ifi_index);
2578 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2579 ifi->ifi_change = tswap32(ifi->ifi_change);
2580 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2581 NLMSG_LENGTH(sizeof(*ifi)));
2587 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2588 ifa = NLMSG_DATA(nlh);
2589 ifa->ifa_index = tswap32(ifa->ifa_index);
2590 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2591 NLMSG_LENGTH(sizeof(*ifa)));
2598 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2599 rtm = NLMSG_DATA(nlh);
2600 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2601 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2602 NLMSG_LENGTH(sizeof(*rtm)));
2606 return -TARGET_EOPNOTSUPP;
2611 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2613 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2615 #endif /* CONFIG_RTNETLINK */
2617 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2619 switch (nlh->nlmsg_type) {
2621 gemu_log("Unknown host audit message type %d\n",
2623 return -TARGET_EINVAL;
2628 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2631 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2634 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2636 switch (nlh->nlmsg_type) {
2638 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2639 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2642 gemu_log("Unknown target audit message type %d\n",
2644 return -TARGET_EINVAL;
2650 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2652 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2655 /* do_setsockopt() Must return target values and target errnos. */
2656 static abi_long do_setsockopt(int sockfd, int level, int optname,
2657 abi_ulong optval_addr, socklen_t optlen)
2661 struct ip_mreqn *ip_mreq;
2662 struct ip_mreq_source *ip_mreq_source;
2666 /* TCP options all take an 'int' value. */
2667 if (optlen < sizeof(uint32_t))
2668 return -TARGET_EINVAL;
2670 if (get_user_u32(val, optval_addr))
2671 return -TARGET_EFAULT;
2672 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2679 case IP_ROUTER_ALERT:
2683 case IP_MTU_DISCOVER:
2689 case IP_MULTICAST_TTL:
2690 case IP_MULTICAST_LOOP:
2692 if (optlen >= sizeof(uint32_t)) {
2693 if (get_user_u32(val, optval_addr))
2694 return -TARGET_EFAULT;
2695 } else if (optlen >= 1) {
2696 if (get_user_u8(val, optval_addr))
2697 return -TARGET_EFAULT;
2699 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2701 case IP_ADD_MEMBERSHIP:
2702 case IP_DROP_MEMBERSHIP:
2703 if (optlen < sizeof (struct target_ip_mreq) ||
2704 optlen > sizeof (struct target_ip_mreqn))
2705 return -TARGET_EINVAL;
2707 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2708 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2709 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2712 case IP_BLOCK_SOURCE:
2713 case IP_UNBLOCK_SOURCE:
2714 case IP_ADD_SOURCE_MEMBERSHIP:
2715 case IP_DROP_SOURCE_MEMBERSHIP:
2716 if (optlen != sizeof (struct target_ip_mreq_source))
2717 return -TARGET_EINVAL;
2719 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2720 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2721 unlock_user (ip_mreq_source, optval_addr, 0);
2730 case IPV6_MTU_DISCOVER:
2733 case IPV6_RECVPKTINFO:
2735 if (optlen < sizeof(uint32_t)) {
2736 return -TARGET_EINVAL;
2738 if (get_user_u32(val, optval_addr)) {
2739 return -TARGET_EFAULT;
2741 ret = get_errno(setsockopt(sockfd, level, optname,
2742 &val, sizeof(val)));
2751 /* struct icmp_filter takes an u32 value */
2752 if (optlen < sizeof(uint32_t)) {
2753 return -TARGET_EINVAL;
2756 if (get_user_u32(val, optval_addr)) {
2757 return -TARGET_EFAULT;
2759 ret = get_errno(setsockopt(sockfd, level, optname,
2760 &val, sizeof(val)));
2767 case TARGET_SOL_SOCKET:
2769 case TARGET_SO_RCVTIMEO:
2773 optname = SO_RCVTIMEO;
2776 if (optlen != sizeof(struct target_timeval)) {
2777 return -TARGET_EINVAL;
2780 if (copy_from_user_timeval(&tv, optval_addr)) {
2781 return -TARGET_EFAULT;
2784 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2788 case TARGET_SO_SNDTIMEO:
2789 optname = SO_SNDTIMEO;
2791 case TARGET_SO_ATTACH_FILTER:
2793 struct target_sock_fprog *tfprog;
2794 struct target_sock_filter *tfilter;
2795 struct sock_fprog fprog;
2796 struct sock_filter *filter;
2799 if (optlen != sizeof(*tfprog)) {
2800 return -TARGET_EINVAL;
2802 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2803 return -TARGET_EFAULT;
2805 if (!lock_user_struct(VERIFY_READ, tfilter,
2806 tswapal(tfprog->filter), 0)) {
2807 unlock_user_struct(tfprog, optval_addr, 1);
2808 return -TARGET_EFAULT;
2811 fprog.len = tswap16(tfprog->len);
2812 filter = g_try_new(struct sock_filter, fprog.len);
2813 if (filter == NULL) {
2814 unlock_user_struct(tfilter, tfprog->filter, 1);
2815 unlock_user_struct(tfprog, optval_addr, 1);
2816 return -TARGET_ENOMEM;
2818 for (i = 0; i < fprog.len; i++) {
2819 filter[i].code = tswap16(tfilter[i].code);
2820 filter[i].jt = tfilter[i].jt;
2821 filter[i].jf = tfilter[i].jf;
2822 filter[i].k = tswap32(tfilter[i].k);
2824 fprog.filter = filter;
2826 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2827 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2830 unlock_user_struct(tfilter, tfprog->filter, 1);
2831 unlock_user_struct(tfprog, optval_addr, 1);
2834 case TARGET_SO_BINDTODEVICE:
2836 char *dev_ifname, *addr_ifname;
2838 if (optlen > IFNAMSIZ - 1) {
2839 optlen = IFNAMSIZ - 1;
2841 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2843 return -TARGET_EFAULT;
2845 optname = SO_BINDTODEVICE;
2846 addr_ifname = alloca(IFNAMSIZ);
2847 memcpy(addr_ifname, dev_ifname, optlen);
2848 addr_ifname[optlen] = 0;
2849 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2850 addr_ifname, optlen));
2851 unlock_user (dev_ifname, optval_addr, 0);
2854 /* Options with 'int' argument. */
2855 case TARGET_SO_DEBUG:
2858 case TARGET_SO_REUSEADDR:
2859 optname = SO_REUSEADDR;
2861 case TARGET_SO_TYPE:
2864 case TARGET_SO_ERROR:
2867 case TARGET_SO_DONTROUTE:
2868 optname = SO_DONTROUTE;
2870 case TARGET_SO_BROADCAST:
2871 optname = SO_BROADCAST;
2873 case TARGET_SO_SNDBUF:
2874 optname = SO_SNDBUF;
2876 case TARGET_SO_SNDBUFFORCE:
2877 optname = SO_SNDBUFFORCE;
2879 case TARGET_SO_RCVBUF:
2880 optname = SO_RCVBUF;
2882 case TARGET_SO_RCVBUFFORCE:
2883 optname = SO_RCVBUFFORCE;
2885 case TARGET_SO_KEEPALIVE:
2886 optname = SO_KEEPALIVE;
2888 case TARGET_SO_OOBINLINE:
2889 optname = SO_OOBINLINE;
2891 case TARGET_SO_NO_CHECK:
2892 optname = SO_NO_CHECK;
2894 case TARGET_SO_PRIORITY:
2895 optname = SO_PRIORITY;
2898 case TARGET_SO_BSDCOMPAT:
2899 optname = SO_BSDCOMPAT;
2902 case TARGET_SO_PASSCRED:
2903 optname = SO_PASSCRED;
2905 case TARGET_SO_PASSSEC:
2906 optname = SO_PASSSEC;
2908 case TARGET_SO_TIMESTAMP:
2909 optname = SO_TIMESTAMP;
2911 case TARGET_SO_RCVLOWAT:
2912 optname = SO_RCVLOWAT;
2918 if (optlen < sizeof(uint32_t))
2919 return -TARGET_EINVAL;
2921 if (get_user_u32(val, optval_addr))
2922 return -TARGET_EFAULT;
2923 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2927 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2928 ret = -TARGET_ENOPROTOOPT;
2933 /* do_getsockopt() Must return target values and target errnos. */
2934 static abi_long do_getsockopt(int sockfd, int level, int optname,
2935 abi_ulong optval_addr, abi_ulong optlen)
2942 case TARGET_SOL_SOCKET:
2945 /* These don't just return a single integer */
2946 case TARGET_SO_LINGER:
2947 case TARGET_SO_RCVTIMEO:
2948 case TARGET_SO_SNDTIMEO:
2949 case TARGET_SO_PEERNAME:
2951 case TARGET_SO_PEERCRED: {
2954 struct target_ucred *tcr;
2956 if (get_user_u32(len, optlen)) {
2957 return -TARGET_EFAULT;
2960 return -TARGET_EINVAL;
2964 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2972 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2973 return -TARGET_EFAULT;
2975 __put_user(cr.pid, &tcr->pid);
2976 __put_user(cr.uid, &tcr->uid);
2977 __put_user(cr.gid, &tcr->gid);
2978 unlock_user_struct(tcr, optval_addr, 1);
2979 if (put_user_u32(len, optlen)) {
2980 return -TARGET_EFAULT;
2984 /* Options with 'int' argument. */
2985 case TARGET_SO_DEBUG:
2988 case TARGET_SO_REUSEADDR:
2989 optname = SO_REUSEADDR;
2991 case TARGET_SO_TYPE:
2994 case TARGET_SO_ERROR:
2997 case TARGET_SO_DONTROUTE:
2998 optname = SO_DONTROUTE;
3000 case TARGET_SO_BROADCAST:
3001 optname = SO_BROADCAST;
3003 case TARGET_SO_SNDBUF:
3004 optname = SO_SNDBUF;
3006 case TARGET_SO_RCVBUF:
3007 optname = SO_RCVBUF;
3009 case TARGET_SO_KEEPALIVE:
3010 optname = SO_KEEPALIVE;
3012 case TARGET_SO_OOBINLINE:
3013 optname = SO_OOBINLINE;
3015 case TARGET_SO_NO_CHECK:
3016 optname = SO_NO_CHECK;
3018 case TARGET_SO_PRIORITY:
3019 optname = SO_PRIORITY;
3022 case TARGET_SO_BSDCOMPAT:
3023 optname = SO_BSDCOMPAT;
3026 case TARGET_SO_PASSCRED:
3027 optname = SO_PASSCRED;
3029 case TARGET_SO_TIMESTAMP:
3030 optname = SO_TIMESTAMP;
3032 case TARGET_SO_RCVLOWAT:
3033 optname = SO_RCVLOWAT;
3035 case TARGET_SO_ACCEPTCONN:
3036 optname = SO_ACCEPTCONN;
3043 /* TCP options all take an 'int' value. */
3045 if (get_user_u32(len, optlen))
3046 return -TARGET_EFAULT;
3048 return -TARGET_EINVAL;
3050 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3053 if (optname == SO_TYPE) {
3054 val = host_to_target_sock_type(val);
3059 if (put_user_u32(val, optval_addr))
3060 return -TARGET_EFAULT;
3062 if (put_user_u8(val, optval_addr))
3063 return -TARGET_EFAULT;
3065 if (put_user_u32(len, optlen))
3066 return -TARGET_EFAULT;
3073 case IP_ROUTER_ALERT:
3077 case IP_MTU_DISCOVER:
3083 case IP_MULTICAST_TTL:
3084 case IP_MULTICAST_LOOP:
3085 if (get_user_u32(len, optlen))
3086 return -TARGET_EFAULT;
3088 return -TARGET_EINVAL;
3090 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3093 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3095 if (put_user_u32(len, optlen)
3096 || put_user_u8(val, optval_addr))
3097 return -TARGET_EFAULT;
3099 if (len > sizeof(int))
3101 if (put_user_u32(len, optlen)
3102 || put_user_u32(val, optval_addr))
3103 return -TARGET_EFAULT;
3107 ret = -TARGET_ENOPROTOOPT;
3113 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3115 ret = -TARGET_EOPNOTSUPP;
3121 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3122 abi_ulong count, int copy)
3124 struct target_iovec *target_vec;
3126 abi_ulong total_len, max_len;
3129 bool bad_address = false;
3135 if (count > IOV_MAX) {
3140 vec = g_try_new0(struct iovec, count);
3146 target_vec = lock_user(VERIFY_READ, target_addr,
3147 count * sizeof(struct target_iovec), 1);
3148 if (target_vec == NULL) {
3153 /* ??? If host page size > target page size, this will result in a
3154 value larger than what we can actually support. */
3155 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3158 for (i = 0; i < count; i++) {
3159 abi_ulong base = tswapal(target_vec[i].iov_base);
3160 abi_long len = tswapal(target_vec[i].iov_len);
3165 } else if (len == 0) {
3166 /* Zero length pointer is ignored. */
3167 vec[i].iov_base = 0;
3169 vec[i].iov_base = lock_user(type, base, len, copy);
3170 /* If the first buffer pointer is bad, this is a fault. But
3171 * subsequent bad buffers will result in a partial write; this
3172 * is realized by filling the vector with null pointers and
3174 if (!vec[i].iov_base) {
3185 if (len > max_len - total_len) {
3186 len = max_len - total_len;
3189 vec[i].iov_len = len;
3193 unlock_user(target_vec, target_addr, 0);
3198 if (tswapal(target_vec[i].iov_len) > 0) {
3199 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3202 unlock_user(target_vec, target_addr, 0);
3209 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3210 abi_ulong count, int copy)
3212 struct target_iovec *target_vec;
3215 target_vec = lock_user(VERIFY_READ, target_addr,
3216 count * sizeof(struct target_iovec), 1);
3218 for (i = 0; i < count; i++) {
3219 abi_ulong base = tswapal(target_vec[i].iov_base);
3220 abi_long len = tswapal(target_vec[i].iov_len);
3224 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3226 unlock_user(target_vec, target_addr, 0);
3232 static inline int target_to_host_sock_type(int *type)
3235 int target_type = *type;
3237 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3238 case TARGET_SOCK_DGRAM:
3239 host_type = SOCK_DGRAM;
3241 case TARGET_SOCK_STREAM:
3242 host_type = SOCK_STREAM;
3245 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3248 if (target_type & TARGET_SOCK_CLOEXEC) {
3249 #if defined(SOCK_CLOEXEC)
3250 host_type |= SOCK_CLOEXEC;
3252 return -TARGET_EINVAL;
3255 if (target_type & TARGET_SOCK_NONBLOCK) {
3256 #if defined(SOCK_NONBLOCK)
3257 host_type |= SOCK_NONBLOCK;
3258 #elif !defined(O_NONBLOCK)
3259 return -TARGET_EINVAL;
3266 /* Try to emulate socket type flags after socket creation. */
3267 static int sock_flags_fixup(int fd, int target_type)
3269 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3270 if (target_type & TARGET_SOCK_NONBLOCK) {
3271 int flags = fcntl(fd, F_GETFL);
3272 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3274 return -TARGET_EINVAL;
3281 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3282 abi_ulong target_addr,
3285 struct sockaddr *addr = host_addr;
3286 struct target_sockaddr *target_saddr;
3288 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3289 if (!target_saddr) {
3290 return -TARGET_EFAULT;
3293 memcpy(addr, target_saddr, len);
3294 addr->sa_family = tswap16(target_saddr->sa_family);
3295 /* spkt_protocol is big-endian */
3297 unlock_user(target_saddr, target_addr, 0);
3301 static TargetFdTrans target_packet_trans = {
3302 .target_to_host_addr = packet_target_to_host_sockaddr,
3305 #ifdef CONFIG_RTNETLINK
3306 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3310 ret = target_to_host_nlmsg_route(buf, len);
3318 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3322 ret = host_to_target_nlmsg_route(buf, len);
3330 static TargetFdTrans target_netlink_route_trans = {
3331 .target_to_host_data = netlink_route_target_to_host,
3332 .host_to_target_data = netlink_route_host_to_target,
3334 #endif /* CONFIG_RTNETLINK */
3336 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3340 ret = target_to_host_nlmsg_audit(buf, len);
3348 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3352 ret = host_to_target_nlmsg_audit(buf, len);
3360 static TargetFdTrans target_netlink_audit_trans = {
3361 .target_to_host_data = netlink_audit_target_to_host,
3362 .host_to_target_data = netlink_audit_host_to_target,
3365 /* do_socket() Must return target values and target errnos. */
3366 static abi_long do_socket(int domain, int type, int protocol)
3368 int target_type = type;
3371 ret = target_to_host_sock_type(&type);
3376 if (domain == PF_NETLINK && !(
3377 #ifdef CONFIG_RTNETLINK
3378 protocol == NETLINK_ROUTE ||
3380 protocol == NETLINK_KOBJECT_UEVENT ||
3381 protocol == NETLINK_AUDIT)) {
3382 return -EPFNOSUPPORT;
3385 if (domain == AF_PACKET ||
3386 (domain == AF_INET && type == SOCK_PACKET)) {
3387 protocol = tswap16(protocol);
3390 ret = get_errno(socket(domain, type, protocol));
3392 ret = sock_flags_fixup(ret, target_type);
3393 if (type == SOCK_PACKET) {
3394 /* Manage an obsolete case :
3395 * if socket type is SOCK_PACKET, bind by name
3397 fd_trans_register(ret, &target_packet_trans);
3398 } else if (domain == PF_NETLINK) {
3400 #ifdef CONFIG_RTNETLINK
3402 fd_trans_register(ret, &target_netlink_route_trans);
3405 case NETLINK_KOBJECT_UEVENT:
3406 /* nothing to do: messages are strings */
3409 fd_trans_register(ret, &target_netlink_audit_trans);
3412 g_assert_not_reached();
3419 /* do_bind() Must return target values and target errnos. */
3420 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3426 if ((int)addrlen < 0) {
3427 return -TARGET_EINVAL;
3430 addr = alloca(addrlen+1);
3432 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3436 return get_errno(bind(sockfd, addr, addrlen));
3439 /* do_connect() Must return target values and target errnos. */
3440 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3446 if ((int)addrlen < 0) {
3447 return -TARGET_EINVAL;
3450 addr = alloca(addrlen+1);
3452 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3456 return get_errno(safe_connect(sockfd, addr, addrlen));
3459 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3460 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3461 int flags, int send)
3467 abi_ulong target_vec;
3469 if (msgp->msg_name) {
3470 msg.msg_namelen = tswap32(msgp->msg_namelen);
3471 msg.msg_name = alloca(msg.msg_namelen+1);
3472 ret = target_to_host_sockaddr(fd, msg.msg_name,
3473 tswapal(msgp->msg_name),
3479 msg.msg_name = NULL;
3480 msg.msg_namelen = 0;
3482 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3483 msg.msg_control = alloca(msg.msg_controllen);
3484 msg.msg_flags = tswap32(msgp->msg_flags);
3486 count = tswapal(msgp->msg_iovlen);
3487 target_vec = tswapal(msgp->msg_iov);
3489 if (count > IOV_MAX) {
3490 /* sendrcvmsg returns a different errno for this condition than
3491 * readv/writev, so we must catch it here before lock_iovec() does.
3493 ret = -TARGET_EMSGSIZE;
3497 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3498 target_vec, count, send);
3500 ret = -host_to_target_errno(errno);
3503 msg.msg_iovlen = count;
3507 if (fd_trans_target_to_host_data(fd)) {
3510 host_msg = g_malloc(msg.msg_iov->iov_len);
3511 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3512 ret = fd_trans_target_to_host_data(fd)(host_msg,
3513 msg.msg_iov->iov_len);
3515 msg.msg_iov->iov_base = host_msg;
3516 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3520 ret = target_to_host_cmsg(&msg, msgp);
3522 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3526 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3527 if (!is_error(ret)) {
3529 if (fd_trans_host_to_target_data(fd)) {
3530 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3533 ret = host_to_target_cmsg(msgp, &msg);
3535 if (!is_error(ret)) {
3536 msgp->msg_namelen = tswap32(msg.msg_namelen);
3537 if (msg.msg_name != NULL) {
3538 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3539 msg.msg_name, msg.msg_namelen);
3551 unlock_iovec(vec, target_vec, count, !send);
3556 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3557 int flags, int send)
3560 struct target_msghdr *msgp;
3562 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3566 return -TARGET_EFAULT;
3568 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3569 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3573 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3574 * so it might not have this *mmsg-specific flag either.
3576 #ifndef MSG_WAITFORONE
3577 #define MSG_WAITFORONE 0x10000
3580 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3581 unsigned int vlen, unsigned int flags,
3584 struct target_mmsghdr *mmsgp;
3588 if (vlen > UIO_MAXIOV) {
3592 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3594 return -TARGET_EFAULT;
3597 for (i = 0; i < vlen; i++) {
3598 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3599 if (is_error(ret)) {
3602 mmsgp[i].msg_len = tswap32(ret);
3603 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3604 if (flags & MSG_WAITFORONE) {
3605 flags |= MSG_DONTWAIT;
3609 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3611 /* Return number of datagrams sent if we sent any at all;
3612 * otherwise return the error.
3620 /* do_accept4() Must return target values and target errnos. */
3621 static abi_long do_accept4(int fd, abi_ulong target_addr,
3622 abi_ulong target_addrlen_addr, int flags)
3629 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3631 if (target_addr == 0) {
3632 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3635 /* linux returns EINVAL if addrlen pointer is invalid */
3636 if (get_user_u32(addrlen, target_addrlen_addr))
3637 return -TARGET_EINVAL;
3639 if ((int)addrlen < 0) {
3640 return -TARGET_EINVAL;
3643 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3644 return -TARGET_EINVAL;
3646 addr = alloca(addrlen);
3648 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3649 if (!is_error(ret)) {
3650 host_to_target_sockaddr(target_addr, addr, addrlen);
3651 if (put_user_u32(addrlen, target_addrlen_addr))
3652 ret = -TARGET_EFAULT;
3657 /* do_getpeername() Must return target values and target errnos. */
3658 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3659 abi_ulong target_addrlen_addr)
3665 if (get_user_u32(addrlen, target_addrlen_addr))
3666 return -TARGET_EFAULT;
3668 if ((int)addrlen < 0) {
3669 return -TARGET_EINVAL;
3672 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3673 return -TARGET_EFAULT;
3675 addr = alloca(addrlen);
3677 ret = get_errno(getpeername(fd, addr, &addrlen));
3678 if (!is_error(ret)) {
3679 host_to_target_sockaddr(target_addr, addr, addrlen);
3680 if (put_user_u32(addrlen, target_addrlen_addr))
3681 ret = -TARGET_EFAULT;
3686 /* do_getsockname() Must return target values and target errnos. */
3687 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3688 abi_ulong target_addrlen_addr)
3694 if (get_user_u32(addrlen, target_addrlen_addr))
3695 return -TARGET_EFAULT;
3697 if ((int)addrlen < 0) {
3698 return -TARGET_EINVAL;
3701 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3702 return -TARGET_EFAULT;
3704 addr = alloca(addrlen);
3706 ret = get_errno(getsockname(fd, addr, &addrlen));
3707 if (!is_error(ret)) {
3708 host_to_target_sockaddr(target_addr, addr, addrlen);
3709 if (put_user_u32(addrlen, target_addrlen_addr))
3710 ret = -TARGET_EFAULT;
3715 /* do_socketpair() Must return target values and target errnos. */
3716 static abi_long do_socketpair(int domain, int type, int protocol,
3717 abi_ulong target_tab_addr)
3722 target_to_host_sock_type(&type);
3724 ret = get_errno(socketpair(domain, type, protocol, tab));
3725 if (!is_error(ret)) {
3726 if (put_user_s32(tab[0], target_tab_addr)
3727 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3728 ret = -TARGET_EFAULT;
3733 /* do_sendto() Must return target values and target errnos. */
3734 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3735 abi_ulong target_addr, socklen_t addrlen)
3739 void *copy_msg = NULL;
3742 if ((int)addrlen < 0) {
3743 return -TARGET_EINVAL;
3746 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3748 return -TARGET_EFAULT;
3749 if (fd_trans_target_to_host_data(fd)) {
3750 copy_msg = host_msg;
3751 host_msg = g_malloc(len);
3752 memcpy(host_msg, copy_msg, len);
3753 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3759 addr = alloca(addrlen+1);
3760 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3764 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3766 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3771 host_msg = copy_msg;
3773 unlock_user(host_msg, msg, 0);
3777 /* do_recvfrom() Must return target values and target errnos. */
3778 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3779 abi_ulong target_addr,
3780 abi_ulong target_addrlen)
3787 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3789 return -TARGET_EFAULT;
3791 if (get_user_u32(addrlen, target_addrlen)) {
3792 ret = -TARGET_EFAULT;
3795 if ((int)addrlen < 0) {
3796 ret = -TARGET_EINVAL;
3799 addr = alloca(addrlen);
3800 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3803 addr = NULL; /* To keep compiler quiet. */
3804 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3806 if (!is_error(ret)) {
3807 if (fd_trans_host_to_target_data(fd)) {
3808 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3811 host_to_target_sockaddr(target_addr, addr, addrlen);
3812 if (put_user_u32(addrlen, target_addrlen)) {
3813 ret = -TARGET_EFAULT;
3817 unlock_user(host_msg, msg, len);
3820 unlock_user(host_msg, msg, 0);
3825 #ifdef TARGET_NR_socketcall
3826 /* do_socketcall() Must return target values and target errnos. */
3827 static abi_long do_socketcall(int num, abi_ulong vptr)
3829 static const unsigned ac[] = { /* number of arguments per call */
3830 [SOCKOP_socket] = 3, /* domain, type, protocol */
3831 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3832 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3833 [SOCKOP_listen] = 2, /* sockfd, backlog */
3834 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3835 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3836 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3837 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3838 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3839 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3840 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3841 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3842 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3843 [SOCKOP_shutdown] = 2, /* sockfd, how */
3844 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3845 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3846 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3847 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3848 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3849 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3851 abi_long a[6]; /* max 6 args */
3853 /* first, collect the arguments in a[] according to ac[] */
3854 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3856 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3857 for (i = 0; i < ac[num]; ++i) {
3858 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3859 return -TARGET_EFAULT;
3864 /* now when we have the args, actually handle the call */
3866 case SOCKOP_socket: /* domain, type, protocol */
3867 return do_socket(a[0], a[1], a[2]);
3868 case SOCKOP_bind: /* sockfd, addr, addrlen */
3869 return do_bind(a[0], a[1], a[2]);
3870 case SOCKOP_connect: /* sockfd, addr, addrlen */
3871 return do_connect(a[0], a[1], a[2]);
3872 case SOCKOP_listen: /* sockfd, backlog */
3873 return get_errno(listen(a[0], a[1]));
3874 case SOCKOP_accept: /* sockfd, addr, addrlen */
3875 return do_accept4(a[0], a[1], a[2], 0);
3876 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3877 return do_accept4(a[0], a[1], a[2], a[3]);
3878 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3879 return do_getsockname(a[0], a[1], a[2]);
3880 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3881 return do_getpeername(a[0], a[1], a[2]);
3882 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3883 return do_socketpair(a[0], a[1], a[2], a[3]);
3884 case SOCKOP_send: /* sockfd, msg, len, flags */
3885 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3886 case SOCKOP_recv: /* sockfd, msg, len, flags */
3887 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3888 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3889 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3890 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3891 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3892 case SOCKOP_shutdown: /* sockfd, how */
3893 return get_errno(shutdown(a[0], a[1]));
3894 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3895 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3896 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3897 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3898 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3899 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3900 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3901 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3902 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3903 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3904 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3905 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3907 gemu_log("Unsupported socketcall: %d\n", num);
3908 return -TARGET_ENOSYS;
3913 #define N_SHM_REGIONS 32
3915 static struct shm_region {
3919 } shm_regions[N_SHM_REGIONS];
3921 #ifndef TARGET_SEMID64_DS
3922 /* asm-generic version of this struct */
3923 struct target_semid64_ds
3925 struct target_ipc_perm sem_perm;
3926 abi_ulong sem_otime;
3927 #if TARGET_ABI_BITS == 32
3928 abi_ulong __unused1;
3930 abi_ulong sem_ctime;
3931 #if TARGET_ABI_BITS == 32
3932 abi_ulong __unused2;
3934 abi_ulong sem_nsems;
3935 abi_ulong __unused3;
3936 abi_ulong __unused4;
3940 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3941 abi_ulong target_addr)
3943 struct target_ipc_perm *target_ip;
3944 struct target_semid64_ds *target_sd;
3946 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3947 return -TARGET_EFAULT;
3948 target_ip = &(target_sd->sem_perm);
3949 host_ip->__key = tswap32(target_ip->__key);
3950 host_ip->uid = tswap32(target_ip->uid);
3951 host_ip->gid = tswap32(target_ip->gid);
3952 host_ip->cuid = tswap32(target_ip->cuid);
3953 host_ip->cgid = tswap32(target_ip->cgid);
3954 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3955 host_ip->mode = tswap32(target_ip->mode);
3957 host_ip->mode = tswap16(target_ip->mode);
3959 #if defined(TARGET_PPC)
3960 host_ip->__seq = tswap32(target_ip->__seq);
3962 host_ip->__seq = tswap16(target_ip->__seq);
3964 unlock_user_struct(target_sd, target_addr, 0);
3968 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3969 struct ipc_perm *host_ip)
3971 struct target_ipc_perm *target_ip;
3972 struct target_semid64_ds *target_sd;
3974 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3975 return -TARGET_EFAULT;
3976 target_ip = &(target_sd->sem_perm);
3977 target_ip->__key = tswap32(host_ip->__key);
3978 target_ip->uid = tswap32(host_ip->uid);
3979 target_ip->gid = tswap32(host_ip->gid);
3980 target_ip->cuid = tswap32(host_ip->cuid);
3981 target_ip->cgid = tswap32(host_ip->cgid);
3982 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3983 target_ip->mode = tswap32(host_ip->mode);
3985 target_ip->mode = tswap16(host_ip->mode);
3987 #if defined(TARGET_PPC)
3988 target_ip->__seq = tswap32(host_ip->__seq);
3990 target_ip->__seq = tswap16(host_ip->__seq);
3992 unlock_user_struct(target_sd, target_addr, 1);
3996 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3997 abi_ulong target_addr)
3999 struct target_semid64_ds *target_sd;
4001 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4002 return -TARGET_EFAULT;
4003 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4004 return -TARGET_EFAULT;
4005 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4006 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4007 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4008 unlock_user_struct(target_sd, target_addr, 0);
4012 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4013 struct semid_ds *host_sd)
4015 struct target_semid64_ds *target_sd;
4017 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4018 return -TARGET_EFAULT;
4019 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4020 return -TARGET_EFAULT;
4021 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4022 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4023 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4024 unlock_user_struct(target_sd, target_addr, 1);
4028 struct target_seminfo {
4041 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4042 struct seminfo *host_seminfo)
4044 struct target_seminfo *target_seminfo;
4045 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4046 return -TARGET_EFAULT;
4047 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4048 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4049 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4050 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4051 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4052 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4053 __put_user(host_seminfo->semume, &target_seminfo->semume);
4054 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4055 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4056 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4057 unlock_user_struct(target_seminfo, target_addr, 1);
4063 struct semid_ds *buf;
4064 unsigned short *array;
4065 struct seminfo *__buf;
4068 union target_semun {
4075 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4076 abi_ulong target_addr)
4079 unsigned short *array;
4081 struct semid_ds semid_ds;
4084 semun.buf = &semid_ds;
4086 ret = semctl(semid, 0, IPC_STAT, semun);
4088 return get_errno(ret);
4090 nsems = semid_ds.sem_nsems;
4092 *host_array = g_try_new(unsigned short, nsems);
4094 return -TARGET_ENOMEM;
4096 array = lock_user(VERIFY_READ, target_addr,
4097 nsems*sizeof(unsigned short), 1);
4099 g_free(*host_array);
4100 return -TARGET_EFAULT;
4103 for(i=0; i<nsems; i++) {
4104 __get_user((*host_array)[i], &array[i]);
4106 unlock_user(array, target_addr, 0);
4111 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4112 unsigned short **host_array)
4115 unsigned short *array;
4117 struct semid_ds semid_ds;
4120 semun.buf = &semid_ds;
4122 ret = semctl(semid, 0, IPC_STAT, semun);
4124 return get_errno(ret);
4126 nsems = semid_ds.sem_nsems;
4128 array = lock_user(VERIFY_WRITE, target_addr,
4129 nsems*sizeof(unsigned short), 0);
4131 return -TARGET_EFAULT;
4133 for(i=0; i<nsems; i++) {
4134 __put_user((*host_array)[i], &array[i]);
4136 g_free(*host_array);
4137 unlock_user(array, target_addr, 1);
4142 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4143 abi_ulong target_arg)
4145 union target_semun target_su = { .buf = target_arg };
4147 struct semid_ds dsarg;
4148 unsigned short *array = NULL;
4149 struct seminfo seminfo;
4150 abi_long ret = -TARGET_EINVAL;
4157 /* In 64 bit cross-endian situations, we will erroneously pick up
4158 * the wrong half of the union for the "val" element. To rectify
4159 * this, the entire 8-byte structure is byteswapped, followed by
4160 * a swap of the 4 byte val field. In other cases, the data is
4161 * already in proper host byte order. */
4162 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4163 target_su.buf = tswapal(target_su.buf);
4164 arg.val = tswap32(target_su.val);
4166 arg.val = target_su.val;
4168 ret = get_errno(semctl(semid, semnum, cmd, arg));
4172 err = target_to_host_semarray(semid, &array, target_su.array);
4176 ret = get_errno(semctl(semid, semnum, cmd, arg));
4177 err = host_to_target_semarray(semid, target_su.array, &array);
4184 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4188 ret = get_errno(semctl(semid, semnum, cmd, arg));
4189 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4195 arg.__buf = &seminfo;
4196 ret = get_errno(semctl(semid, semnum, cmd, arg));
4197 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4205 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4212 struct target_sembuf {
4213 unsigned short sem_num;
4218 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4219 abi_ulong target_addr,
4222 struct target_sembuf *target_sembuf;
4225 target_sembuf = lock_user(VERIFY_READ, target_addr,
4226 nsops*sizeof(struct target_sembuf), 1);
4228 return -TARGET_EFAULT;
4230 for(i=0; i<nsops; i++) {
4231 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4232 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4233 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4236 unlock_user(target_sembuf, target_addr, 0);
4241 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4243 struct sembuf sops[nsops];
4245 if (target_to_host_sembuf(sops, ptr, nsops))
4246 return -TARGET_EFAULT;
4248 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4251 struct target_msqid_ds
4253 struct target_ipc_perm msg_perm;
4254 abi_ulong msg_stime;
4255 #if TARGET_ABI_BITS == 32
4256 abi_ulong __unused1;
4258 abi_ulong msg_rtime;
4259 #if TARGET_ABI_BITS == 32
4260 abi_ulong __unused2;
4262 abi_ulong msg_ctime;
4263 #if TARGET_ABI_BITS == 32
4264 abi_ulong __unused3;
4266 abi_ulong __msg_cbytes;
4268 abi_ulong msg_qbytes;
4269 abi_ulong msg_lspid;
4270 abi_ulong msg_lrpid;
4271 abi_ulong __unused4;
4272 abi_ulong __unused5;
4275 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4276 abi_ulong target_addr)
4278 struct target_msqid_ds *target_md;
4280 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4281 return -TARGET_EFAULT;
4282 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4283 return -TARGET_EFAULT;
4284 host_md->msg_stime = tswapal(target_md->msg_stime);
4285 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4286 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4287 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4288 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4289 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4290 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4291 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4292 unlock_user_struct(target_md, target_addr, 0);
4296 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4297 struct msqid_ds *host_md)
4299 struct target_msqid_ds *target_md;
4301 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4302 return -TARGET_EFAULT;
4303 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4304 return -TARGET_EFAULT;
4305 target_md->msg_stime = tswapal(host_md->msg_stime);
4306 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4307 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4308 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4309 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4310 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4311 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4312 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4313 unlock_user_struct(target_md, target_addr, 1);
4317 struct target_msginfo {
4325 unsigned short int msgseg;
4328 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4329 struct msginfo *host_msginfo)
4331 struct target_msginfo *target_msginfo;
4332 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4333 return -TARGET_EFAULT;
4334 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4335 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4336 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4337 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4338 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4339 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4340 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4341 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4342 unlock_user_struct(target_msginfo, target_addr, 1);
4346 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4348 struct msqid_ds dsarg;
4349 struct msginfo msginfo;
4350 abi_long ret = -TARGET_EINVAL;
4358 if (target_to_host_msqid_ds(&dsarg,ptr))
4359 return -TARGET_EFAULT;
4360 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4361 if (host_to_target_msqid_ds(ptr,&dsarg))
4362 return -TARGET_EFAULT;
4365 ret = get_errno(msgctl(msgid, cmd, NULL));
4369 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4370 if (host_to_target_msginfo(ptr, &msginfo))
4371 return -TARGET_EFAULT;
4378 struct target_msgbuf {
4383 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4384 ssize_t msgsz, int msgflg)
4386 struct target_msgbuf *target_mb;
4387 struct msgbuf *host_mb;
4391 return -TARGET_EINVAL;
4394 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4395 return -TARGET_EFAULT;
4396 host_mb = g_try_malloc(msgsz + sizeof(long));
4398 unlock_user_struct(target_mb, msgp, 0);
4399 return -TARGET_ENOMEM;
4401 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4402 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4403 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4405 unlock_user_struct(target_mb, msgp, 0);
4410 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4411 ssize_t msgsz, abi_long msgtyp,
4414 struct target_msgbuf *target_mb;
4416 struct msgbuf *host_mb;
4420 return -TARGET_EINVAL;
4423 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4424 return -TARGET_EFAULT;
4426 host_mb = g_try_malloc(msgsz + sizeof(long));
4428 ret = -TARGET_ENOMEM;
4431 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4434 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4435 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4436 if (!target_mtext) {
4437 ret = -TARGET_EFAULT;
4440 memcpy(target_mb->mtext, host_mb->mtext, ret);
4441 unlock_user(target_mtext, target_mtext_addr, ret);
4444 target_mb->mtype = tswapal(host_mb->mtype);
4448 unlock_user_struct(target_mb, msgp, 1);
4453 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4454 abi_ulong target_addr)
4456 struct target_shmid_ds *target_sd;
4458 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4459 return -TARGET_EFAULT;
4460 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4461 return -TARGET_EFAULT;
4462 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4463 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4464 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4465 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4466 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4467 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4468 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4469 unlock_user_struct(target_sd, target_addr, 0);
4473 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4474 struct shmid_ds *host_sd)
4476 struct target_shmid_ds *target_sd;
4478 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4479 return -TARGET_EFAULT;
4480 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4481 return -TARGET_EFAULT;
4482 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4483 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4484 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4485 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4486 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4487 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4488 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4489 unlock_user_struct(target_sd, target_addr, 1);
4493 struct target_shminfo {
4501 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4502 struct shminfo *host_shminfo)
4504 struct target_shminfo *target_shminfo;
4505 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4506 return -TARGET_EFAULT;
4507 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4508 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4509 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4510 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4511 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4512 unlock_user_struct(target_shminfo, target_addr, 1);
4516 struct target_shm_info {
4521 abi_ulong swap_attempts;
4522 abi_ulong swap_successes;
4525 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4526 struct shm_info *host_shm_info)
4528 struct target_shm_info *target_shm_info;
4529 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4530 return -TARGET_EFAULT;
4531 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4532 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4533 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4534 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4535 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4536 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4537 unlock_user_struct(target_shm_info, target_addr, 1);
4541 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4543 struct shmid_ds dsarg;
4544 struct shminfo shminfo;
4545 struct shm_info shm_info;
4546 abi_long ret = -TARGET_EINVAL;
4554 if (target_to_host_shmid_ds(&dsarg, buf))
4555 return -TARGET_EFAULT;
4556 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4557 if (host_to_target_shmid_ds(buf, &dsarg))
4558 return -TARGET_EFAULT;
4561 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4562 if (host_to_target_shminfo(buf, &shminfo))
4563 return -TARGET_EFAULT;
4566 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4567 if (host_to_target_shm_info(buf, &shm_info))
4568 return -TARGET_EFAULT;
4573 ret = get_errno(shmctl(shmid, cmd, NULL));
4580 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4584 struct shmid_ds shm_info;
4587 /* find out the length of the shared memory segment */
4588 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4589 if (is_error(ret)) {
4590 /* can't get length, bail out */
4597 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4599 abi_ulong mmap_start;
4601 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4603 if (mmap_start == -1) {
4605 host_raddr = (void *)-1;
4607 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4610 if (host_raddr == (void *)-1) {
4612 return get_errno((long)host_raddr);
4614 raddr=h2g((unsigned long)host_raddr);
4616 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4617 PAGE_VALID | PAGE_READ |
4618 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4620 for (i = 0; i < N_SHM_REGIONS; i++) {
4621 if (!shm_regions[i].in_use) {
4622 shm_regions[i].in_use = true;
4623 shm_regions[i].start = raddr;
4624 shm_regions[i].size = shm_info.shm_segsz;
4634 static inline abi_long do_shmdt(abi_ulong shmaddr)
4638 for (i = 0; i < N_SHM_REGIONS; ++i) {
4639 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4640 shm_regions[i].in_use = false;
4641 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4646 return get_errno(shmdt(g2h(shmaddr)));
4649 #ifdef TARGET_NR_ipc
4650 /* ??? This only works with linear mappings. */
4651 /* do_ipc() must return target values and target errnos. */
4652 static abi_long do_ipc(unsigned int call, abi_long first,
4653 abi_long second, abi_long third,
4654 abi_long ptr, abi_long fifth)
4659 version = call >> 16;
4664 ret = do_semop(first, ptr, second);
4668 ret = get_errno(semget(first, second, third));
4671 case IPCOP_semctl: {
4672 /* The semun argument to semctl is passed by value, so dereference the
4675 get_user_ual(atptr, ptr);
4676 ret = do_semctl(first, second, third, atptr);
4681 ret = get_errno(msgget(first, second));
4685 ret = do_msgsnd(first, ptr, second, third);
4689 ret = do_msgctl(first, second, ptr);
4696 struct target_ipc_kludge {
4701 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4702 ret = -TARGET_EFAULT;
4706 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4708 unlock_user_struct(tmp, ptr, 0);
4712 ret = do_msgrcv(first, ptr, second, fifth, third);
4721 raddr = do_shmat(first, ptr, second);
4722 if (is_error(raddr))
4723 return get_errno(raddr);
4724 if (put_user_ual(raddr, third))
4725 return -TARGET_EFAULT;
4729 ret = -TARGET_EINVAL;
4734 ret = do_shmdt(ptr);
4738 /* IPC_* flag values are the same on all linux platforms */
4739 ret = get_errno(shmget(first, second, third));
4742 /* IPC_* and SHM_* command values are the same on all linux platforms */
4744 ret = do_shmctl(first, second, ptr);
4747 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4748 ret = -TARGET_ENOSYS;
4755 /* kernel structure types definitions */
4757 #define STRUCT(name, ...) STRUCT_ ## name,
4758 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4760 #include "syscall_types.h"
4764 #undef STRUCT_SPECIAL
4766 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4767 #define STRUCT_SPECIAL(name)
4768 #include "syscall_types.h"
4770 #undef STRUCT_SPECIAL
4772 typedef struct IOCTLEntry IOCTLEntry;
4774 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4775 int fd, int cmd, abi_long arg);
4779 unsigned int host_cmd;
4782 do_ioctl_fn *do_ioctl;
4783 const argtype arg_type[5];
4786 #define IOC_R 0x0001
4787 #define IOC_W 0x0002
4788 #define IOC_RW (IOC_R | IOC_W)
4790 #define MAX_STRUCT_SIZE 4096
4792 #ifdef CONFIG_FIEMAP
4793 /* So fiemap access checks don't overflow on 32 bit systems.
4794 * This is very slightly smaller than the limit imposed by
4795 * the underlying kernel.
4797 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4798 / sizeof(struct fiemap_extent))
4800 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4801 int fd, int cmd, abi_long arg)
4803 /* The parameter for this ioctl is a struct fiemap followed
4804 * by an array of struct fiemap_extent whose size is set
4805 * in fiemap->fm_extent_count. The array is filled in by the
4808 int target_size_in, target_size_out;
4810 const argtype *arg_type = ie->arg_type;
4811 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4814 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4818 assert(arg_type[0] == TYPE_PTR);
4819 assert(ie->access == IOC_RW);
4821 target_size_in = thunk_type_size(arg_type, 0);
4822 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4824 return -TARGET_EFAULT;
4826 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4827 unlock_user(argptr, arg, 0);
4828 fm = (struct fiemap *)buf_temp;
4829 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4830 return -TARGET_EINVAL;
4833 outbufsz = sizeof (*fm) +
4834 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4836 if (outbufsz > MAX_STRUCT_SIZE) {
4837 /* We can't fit all the extents into the fixed size buffer.
4838 * Allocate one that is large enough and use it instead.
4840 fm = g_try_malloc(outbufsz);
4842 return -TARGET_ENOMEM;
4844 memcpy(fm, buf_temp, sizeof(struct fiemap));
4847 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4848 if (!is_error(ret)) {
4849 target_size_out = target_size_in;
4850 /* An extent_count of 0 means we were only counting the extents
4851 * so there are no structs to copy
4853 if (fm->fm_extent_count != 0) {
4854 target_size_out += fm->fm_mapped_extents * extent_size;
4856 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4858 ret = -TARGET_EFAULT;
4860 /* Convert the struct fiemap */
4861 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4862 if (fm->fm_extent_count != 0) {
4863 p = argptr + target_size_in;
4864 /* ...and then all the struct fiemap_extents */
4865 for (i = 0; i < fm->fm_mapped_extents; i++) {
4866 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4871 unlock_user(argptr, arg, target_size_out);
4881 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4882 int fd, int cmd, abi_long arg)
4884 const argtype *arg_type = ie->arg_type;
4888 struct ifconf *host_ifconf;
4890 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4891 int target_ifreq_size;
4896 abi_long target_ifc_buf;
4900 assert(arg_type[0] == TYPE_PTR);
4901 assert(ie->access == IOC_RW);
4904 target_size = thunk_type_size(arg_type, 0);
4906 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4908 return -TARGET_EFAULT;
4909 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4910 unlock_user(argptr, arg, 0);
4912 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4913 target_ifc_len = host_ifconf->ifc_len;
4914 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4916 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4917 nb_ifreq = target_ifc_len / target_ifreq_size;
4918 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4920 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4921 if (outbufsz > MAX_STRUCT_SIZE) {
4922 /* We can't fit all the extents into the fixed size buffer.
4923 * Allocate one that is large enough and use it instead.
4925 host_ifconf = malloc(outbufsz);
4927 return -TARGET_ENOMEM;
4929 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4932 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4934 host_ifconf->ifc_len = host_ifc_len;
4935 host_ifconf->ifc_buf = host_ifc_buf;
4937 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4938 if (!is_error(ret)) {
4939 /* convert host ifc_len to target ifc_len */
4941 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4942 target_ifc_len = nb_ifreq * target_ifreq_size;
4943 host_ifconf->ifc_len = target_ifc_len;
4945 /* restore target ifc_buf */
4947 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4949 /* copy struct ifconf to target user */
4951 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4953 return -TARGET_EFAULT;
4954 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4955 unlock_user(argptr, arg, target_size);
4957 /* copy ifreq[] to target user */
4959 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4960 for (i = 0; i < nb_ifreq ; i++) {
4961 thunk_convert(argptr + i * target_ifreq_size,
4962 host_ifc_buf + i * sizeof(struct ifreq),
4963 ifreq_arg_type, THUNK_TARGET);
4965 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4975 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4976 int cmd, abi_long arg)
4979 struct dm_ioctl *host_dm;
4980 abi_long guest_data;
4981 uint32_t guest_data_size;
4983 const argtype *arg_type = ie->arg_type;
4985 void *big_buf = NULL;
4989 target_size = thunk_type_size(arg_type, 0);
4990 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4992 ret = -TARGET_EFAULT;
4995 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4996 unlock_user(argptr, arg, 0);
4998 /* buf_temp is too small, so fetch things into a bigger buffer */
4999 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5000 memcpy(big_buf, buf_temp, target_size);
5004 guest_data = arg + host_dm->data_start;
5005 if ((guest_data - arg) < 0) {
5009 guest_data_size = host_dm->data_size - host_dm->data_start;
5010 host_data = (char*)host_dm + host_dm->data_start;
5012 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5013 switch (ie->host_cmd) {
5015 case DM_LIST_DEVICES:
5018 case DM_DEV_SUSPEND:
5021 case DM_TABLE_STATUS:
5022 case DM_TABLE_CLEAR:
5024 case DM_LIST_VERSIONS:
5028 case DM_DEV_SET_GEOMETRY:
5029 /* data contains only strings */
5030 memcpy(host_data, argptr, guest_data_size);
5033 memcpy(host_data, argptr, guest_data_size);
5034 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5038 void *gspec = argptr;
5039 void *cur_data = host_data;
5040 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5041 int spec_size = thunk_type_size(arg_type, 0);
5044 for (i = 0; i < host_dm->target_count; i++) {
5045 struct dm_target_spec *spec = cur_data;
5049 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5050 slen = strlen((char*)gspec + spec_size) + 1;
5052 spec->next = sizeof(*spec) + slen;
5053 strcpy((char*)&spec[1], gspec + spec_size);
5055 cur_data += spec->next;
5060 ret = -TARGET_EINVAL;
5061 unlock_user(argptr, guest_data, 0);
5064 unlock_user(argptr, guest_data, 0);
5066 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5067 if (!is_error(ret)) {
5068 guest_data = arg + host_dm->data_start;
5069 guest_data_size = host_dm->data_size - host_dm->data_start;
5070 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5071 switch (ie->host_cmd) {
5076 case DM_DEV_SUSPEND:
5079 case DM_TABLE_CLEAR:
5081 case DM_DEV_SET_GEOMETRY:
5082 /* no return data */
5084 case DM_LIST_DEVICES:
5086 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5087 uint32_t remaining_data = guest_data_size;
5088 void *cur_data = argptr;
5089 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5090 int nl_size = 12; /* can't use thunk_size due to alignment */
5093 uint32_t next = nl->next;
5095 nl->next = nl_size + (strlen(nl->name) + 1);
5097 if (remaining_data < nl->next) {
5098 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5101 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5102 strcpy(cur_data + nl_size, nl->name);
5103 cur_data += nl->next;
5104 remaining_data -= nl->next;
5108 nl = (void*)nl + next;
5113 case DM_TABLE_STATUS:
5115 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5116 void *cur_data = argptr;
5117 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5118 int spec_size = thunk_type_size(arg_type, 0);
5121 for (i = 0; i < host_dm->target_count; i++) {
5122 uint32_t next = spec->next;
5123 int slen = strlen((char*)&spec[1]) + 1;
5124 spec->next = (cur_data - argptr) + spec_size + slen;
5125 if (guest_data_size < spec->next) {
5126 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5129 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5130 strcpy(cur_data + spec_size, (char*)&spec[1]);
5131 cur_data = argptr + spec->next;
5132 spec = (void*)host_dm + host_dm->data_start + next;
5138 void *hdata = (void*)host_dm + host_dm->data_start;
5139 int count = *(uint32_t*)hdata;
5140 uint64_t *hdev = hdata + 8;
5141 uint64_t *gdev = argptr + 8;
5144 *(uint32_t*)argptr = tswap32(count);
5145 for (i = 0; i < count; i++) {
5146 *gdev = tswap64(*hdev);
5152 case DM_LIST_VERSIONS:
5154 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5155 uint32_t remaining_data = guest_data_size;
5156 void *cur_data = argptr;
5157 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5158 int vers_size = thunk_type_size(arg_type, 0);
5161 uint32_t next = vers->next;
5163 vers->next = vers_size + (strlen(vers->name) + 1);
5165 if (remaining_data < vers->next) {
5166 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5169 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5170 strcpy(cur_data + vers_size, vers->name);
5171 cur_data += vers->next;
5172 remaining_data -= vers->next;
5176 vers = (void*)vers + next;
5181 unlock_user(argptr, guest_data, 0);
5182 ret = -TARGET_EINVAL;
5185 unlock_user(argptr, guest_data, guest_data_size);
5187 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5189 ret = -TARGET_EFAULT;
5192 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5193 unlock_user(argptr, arg, target_size);
5200 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5201 int cmd, abi_long arg)
5205 const argtype *arg_type = ie->arg_type;
5206 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5209 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5210 struct blkpg_partition host_part;
5212 /* Read and convert blkpg */
5214 target_size = thunk_type_size(arg_type, 0);
5215 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5217 ret = -TARGET_EFAULT;
5220 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5221 unlock_user(argptr, arg, 0);
5223 switch (host_blkpg->op) {
5224 case BLKPG_ADD_PARTITION:
5225 case BLKPG_DEL_PARTITION:
5226 /* payload is struct blkpg_partition */
5229 /* Unknown opcode */
5230 ret = -TARGET_EINVAL;
5234 /* Read and convert blkpg->data */
5235 arg = (abi_long)(uintptr_t)host_blkpg->data;
5236 target_size = thunk_type_size(part_arg_type, 0);
5237 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5239 ret = -TARGET_EFAULT;
5242 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5243 unlock_user(argptr, arg, 0);
5245 /* Swizzle the data pointer to our local copy and call! */
5246 host_blkpg->data = &host_part;
5247 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5253 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5254 int fd, int cmd, abi_long arg)
5256 const argtype *arg_type = ie->arg_type;
5257 const StructEntry *se;
5258 const argtype *field_types;
5259 const int *dst_offsets, *src_offsets;
5262 abi_ulong *target_rt_dev_ptr;
5263 unsigned long *host_rt_dev_ptr;
5267 assert(ie->access == IOC_W);
5268 assert(*arg_type == TYPE_PTR);
5270 assert(*arg_type == TYPE_STRUCT);
5271 target_size = thunk_type_size(arg_type, 0);
5272 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5274 return -TARGET_EFAULT;
5277 assert(*arg_type == (int)STRUCT_rtentry);
5278 se = struct_entries + *arg_type++;
5279 assert(se->convert[0] == NULL);
5280 /* convert struct here to be able to catch rt_dev string */
5281 field_types = se->field_types;
5282 dst_offsets = se->field_offsets[THUNK_HOST];
5283 src_offsets = se->field_offsets[THUNK_TARGET];
5284 for (i = 0; i < se->nb_fields; i++) {
5285 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5286 assert(*field_types == TYPE_PTRVOID);
5287 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5288 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5289 if (*target_rt_dev_ptr != 0) {
5290 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5291 tswapal(*target_rt_dev_ptr));
5292 if (!*host_rt_dev_ptr) {
5293 unlock_user(argptr, arg, 0);
5294 return -TARGET_EFAULT;
5297 *host_rt_dev_ptr = 0;
5302 field_types = thunk_convert(buf_temp + dst_offsets[i],
5303 argptr + src_offsets[i],
5304 field_types, THUNK_HOST);
5306 unlock_user(argptr, arg, 0);
5308 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5309 if (*host_rt_dev_ptr != 0) {
5310 unlock_user((void *)*host_rt_dev_ptr,
5311 *target_rt_dev_ptr, 0);
5316 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5317 int fd, int cmd, abi_long arg)
5319 int sig = target_to_host_signal(arg);
5320 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5323 static IOCTLEntry ioctl_entries[] = {
5324 #define IOCTL(cmd, access, ...) \
5325 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5326 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5327 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5332 /* ??? Implement proper locking for ioctls. */
5333 /* do_ioctl() Must return target values and target errnos. */
5334 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5336 const IOCTLEntry *ie;
5337 const argtype *arg_type;
5339 uint8_t buf_temp[MAX_STRUCT_SIZE];
5345 if (ie->target_cmd == 0) {
5346 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5347 return -TARGET_ENOSYS;
5349 if (ie->target_cmd == cmd)
5353 arg_type = ie->arg_type;
5355 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5358 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5361 switch(arg_type[0]) {
5364 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5368 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5372 target_size = thunk_type_size(arg_type, 0);
5373 switch(ie->access) {
5375 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5376 if (!is_error(ret)) {
5377 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5379 return -TARGET_EFAULT;
5380 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5381 unlock_user(argptr, arg, target_size);
5385 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5387 return -TARGET_EFAULT;
5388 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5389 unlock_user(argptr, arg, 0);
5390 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5394 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5396 return -TARGET_EFAULT;
5397 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5398 unlock_user(argptr, arg, 0);
5399 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5400 if (!is_error(ret)) {
5401 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5403 return -TARGET_EFAULT;
5404 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5405 unlock_user(argptr, arg, target_size);
5411 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5412 (long)cmd, arg_type[0]);
5413 ret = -TARGET_ENOSYS;
5419 static const bitmask_transtbl iflag_tbl[] = {
5420 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5421 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5422 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5423 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5424 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5425 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5426 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5427 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5428 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5429 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5430 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5431 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5432 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5433 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5437 static const bitmask_transtbl oflag_tbl[] = {
5438 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5439 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5440 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5441 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5442 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5443 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5444 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5445 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5446 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5447 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5448 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5449 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5450 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5451 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5452 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5453 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5454 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5455 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5456 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5457 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5458 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5459 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5460 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5461 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5465 static const bitmask_transtbl cflag_tbl[] = {
5466 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5467 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5468 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5469 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5470 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5471 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5472 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5473 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5474 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5475 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5476 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5477 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5478 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5479 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5480 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5481 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5482 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5483 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5484 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5485 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5486 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5487 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5488 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5489 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5490 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5491 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5492 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5493 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5494 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5495 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5496 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5500 static const bitmask_transtbl lflag_tbl[] = {
5501 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5502 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5503 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5504 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5505 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5506 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5507 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5508 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5509 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5510 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5511 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5512 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5513 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5514 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5515 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5519 static void target_to_host_termios (void *dst, const void *src)
5521 struct host_termios *host = dst;
5522 const struct target_termios *target = src;
5525 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5527 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5529 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5531 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5532 host->c_line = target->c_line;
5534 memset(host->c_cc, 0, sizeof(host->c_cc));
5535 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5536 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5537 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5538 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5539 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5540 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5541 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5542 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5543 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5544 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5545 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5546 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5547 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5548 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5549 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5550 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5551 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5554 static void host_to_target_termios (void *dst, const void *src)
5556 struct target_termios *target = dst;
5557 const struct host_termios *host = src;
5560 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5562 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5564 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5566 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5567 target->c_line = host->c_line;
5569 memset(target->c_cc, 0, sizeof(target->c_cc));
5570 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5571 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5572 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5573 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5574 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5575 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5576 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5577 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5578 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5579 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5580 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5581 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5582 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5583 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5584 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5585 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5586 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5589 static const StructEntry struct_termios_def = {
5590 .convert = { host_to_target_termios, target_to_host_termios },
5591 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5592 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5595 static bitmask_transtbl mmap_flags_tbl[] = {
5596 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5597 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5598 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5599 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5600 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5601 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5602 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5603 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5604 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5609 #if defined(TARGET_I386)
5611 /* NOTE: there is really one LDT for all the threads */
5612 static uint8_t *ldt_table;
5614 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5621 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5622 if (size > bytecount)
5624 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5626 return -TARGET_EFAULT;
5627 /* ??? Should this by byteswapped? */
5628 memcpy(p, ldt_table, size);
5629 unlock_user(p, ptr, size);
5633 /* XXX: add locking support */
5634 static abi_long write_ldt(CPUX86State *env,
5635 abi_ulong ptr, unsigned long bytecount, int oldmode)
5637 struct target_modify_ldt_ldt_s ldt_info;
5638 struct target_modify_ldt_ldt_s *target_ldt_info;
5639 int seg_32bit, contents, read_exec_only, limit_in_pages;
5640 int seg_not_present, useable, lm;
5641 uint32_t *lp, entry_1, entry_2;
5643 if (bytecount != sizeof(ldt_info))
5644 return -TARGET_EINVAL;
5645 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5646 return -TARGET_EFAULT;
5647 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5648 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5649 ldt_info.limit = tswap32(target_ldt_info->limit);
5650 ldt_info.flags = tswap32(target_ldt_info->flags);
5651 unlock_user_struct(target_ldt_info, ptr, 0);
5653 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5654 return -TARGET_EINVAL;
5655 seg_32bit = ldt_info.flags & 1;
5656 contents = (ldt_info.flags >> 1) & 3;
5657 read_exec_only = (ldt_info.flags >> 3) & 1;
5658 limit_in_pages = (ldt_info.flags >> 4) & 1;
5659 seg_not_present = (ldt_info.flags >> 5) & 1;
5660 useable = (ldt_info.flags >> 6) & 1;
5664 lm = (ldt_info.flags >> 7) & 1;
5666 if (contents == 3) {
5668 return -TARGET_EINVAL;
5669 if (seg_not_present == 0)
5670 return -TARGET_EINVAL;
5672 /* allocate the LDT */
5674 env->ldt.base = target_mmap(0,
5675 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5676 PROT_READ|PROT_WRITE,
5677 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5678 if (env->ldt.base == -1)
5679 return -TARGET_ENOMEM;
5680 memset(g2h(env->ldt.base), 0,
5681 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5682 env->ldt.limit = 0xffff;
5683 ldt_table = g2h(env->ldt.base);
5686 /* NOTE: same code as Linux kernel */
5687 /* Allow LDTs to be cleared by the user. */
5688 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5691 read_exec_only == 1 &&
5693 limit_in_pages == 0 &&
5694 seg_not_present == 1 &&
5702 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5703 (ldt_info.limit & 0x0ffff);
5704 entry_2 = (ldt_info.base_addr & 0xff000000) |
5705 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5706 (ldt_info.limit & 0xf0000) |
5707 ((read_exec_only ^ 1) << 9) |
5709 ((seg_not_present ^ 1) << 15) |
5711 (limit_in_pages << 23) |
5715 entry_2 |= (useable << 20);
5717 /* Install the new entry ... */
5719 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5720 lp[0] = tswap32(entry_1);
5721 lp[1] = tswap32(entry_2);
5725 /* specific and weird i386 syscalls */
5726 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5727 unsigned long bytecount)
5733 ret = read_ldt(ptr, bytecount);
5736 ret = write_ldt(env, ptr, bytecount, 1);
5739 ret = write_ldt(env, ptr, bytecount, 0);
5742 ret = -TARGET_ENOSYS;
5748 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5749 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5751 uint64_t *gdt_table = g2h(env->gdt.base);
5752 struct target_modify_ldt_ldt_s ldt_info;
5753 struct target_modify_ldt_ldt_s *target_ldt_info;
5754 int seg_32bit, contents, read_exec_only, limit_in_pages;
5755 int seg_not_present, useable, lm;
5756 uint32_t *lp, entry_1, entry_2;
5759 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5760 if (!target_ldt_info)
5761 return -TARGET_EFAULT;
5762 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5763 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5764 ldt_info.limit = tswap32(target_ldt_info->limit);
5765 ldt_info.flags = tswap32(target_ldt_info->flags);
5766 if (ldt_info.entry_number == -1) {
5767 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5768 if (gdt_table[i] == 0) {
5769 ldt_info.entry_number = i;
5770 target_ldt_info->entry_number = tswap32(i);
5775 unlock_user_struct(target_ldt_info, ptr, 1);
5777 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5778 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5779 return -TARGET_EINVAL;
5780 seg_32bit = ldt_info.flags & 1;
5781 contents = (ldt_info.flags >> 1) & 3;
5782 read_exec_only = (ldt_info.flags >> 3) & 1;
5783 limit_in_pages = (ldt_info.flags >> 4) & 1;
5784 seg_not_present = (ldt_info.flags >> 5) & 1;
5785 useable = (ldt_info.flags >> 6) & 1;
5789 lm = (ldt_info.flags >> 7) & 1;
5792 if (contents == 3) {
5793 if (seg_not_present == 0)
5794 return -TARGET_EINVAL;
5797 /* NOTE: same code as Linux kernel */
5798 /* Allow LDTs to be cleared by the user. */
5799 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5800 if ((contents == 0 &&
5801 read_exec_only == 1 &&
5803 limit_in_pages == 0 &&
5804 seg_not_present == 1 &&
5812 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5813 (ldt_info.limit & 0x0ffff);
5814 entry_2 = (ldt_info.base_addr & 0xff000000) |
5815 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5816 (ldt_info.limit & 0xf0000) |
5817 ((read_exec_only ^ 1) << 9) |
5819 ((seg_not_present ^ 1) << 15) |
5821 (limit_in_pages << 23) |
5826 /* Install the new entry ... */
5828 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5829 lp[0] = tswap32(entry_1);
5830 lp[1] = tswap32(entry_2);
5834 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5836 struct target_modify_ldt_ldt_s *target_ldt_info;
5837 uint64_t *gdt_table = g2h(env->gdt.base);
5838 uint32_t base_addr, limit, flags;
5839 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5840 int seg_not_present, useable, lm;
5841 uint32_t *lp, entry_1, entry_2;
5843 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5844 if (!target_ldt_info)
5845 return -TARGET_EFAULT;
5846 idx = tswap32(target_ldt_info->entry_number);
5847 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5848 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5849 unlock_user_struct(target_ldt_info, ptr, 1);
5850 return -TARGET_EINVAL;
5852 lp = (uint32_t *)(gdt_table + idx);
5853 entry_1 = tswap32(lp[0]);
5854 entry_2 = tswap32(lp[1]);
5856 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5857 contents = (entry_2 >> 10) & 3;
5858 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5859 seg_32bit = (entry_2 >> 22) & 1;
5860 limit_in_pages = (entry_2 >> 23) & 1;
5861 useable = (entry_2 >> 20) & 1;
5865 lm = (entry_2 >> 21) & 1;
5867 flags = (seg_32bit << 0) | (contents << 1) |
5868 (read_exec_only << 3) | (limit_in_pages << 4) |
5869 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5870 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5871 base_addr = (entry_1 >> 16) |
5872 (entry_2 & 0xff000000) |
5873 ((entry_2 & 0xff) << 16);
5874 target_ldt_info->base_addr = tswapal(base_addr);
5875 target_ldt_info->limit = tswap32(limit);
5876 target_ldt_info->flags = tswap32(flags);
5877 unlock_user_struct(target_ldt_info, ptr, 1);
5880 #endif /* TARGET_I386 && TARGET_ABI32 */
5882 #ifndef TARGET_ABI32
5883 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5890 case TARGET_ARCH_SET_GS:
5891 case TARGET_ARCH_SET_FS:
5892 if (code == TARGET_ARCH_SET_GS)
5896 cpu_x86_load_seg(env, idx, 0);
5897 env->segs[idx].base = addr;
5899 case TARGET_ARCH_GET_GS:
5900 case TARGET_ARCH_GET_FS:
5901 if (code == TARGET_ARCH_GET_GS)
5905 val = env->segs[idx].base;
5906 if (put_user(val, addr, abi_ulong))
5907 ret = -TARGET_EFAULT;
5910 ret = -TARGET_EINVAL;
5917 #endif /* defined(TARGET_I386) */
5919 #define NEW_STACK_SIZE 0x40000
5922 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5925 pthread_mutex_t mutex;
5926 pthread_cond_t cond;
5929 abi_ulong child_tidptr;
5930 abi_ulong parent_tidptr;
5934 static void *clone_func(void *arg)
5936 new_thread_info *info = arg;
5941 rcu_register_thread();
5943 cpu = ENV_GET_CPU(env);
5945 ts = (TaskState *)cpu->opaque;
5946 info->tid = gettid();
5947 cpu->host_tid = info->tid;
5949 if (info->child_tidptr)
5950 put_user_u32(info->tid, info->child_tidptr);
5951 if (info->parent_tidptr)
5952 put_user_u32(info->tid, info->parent_tidptr);
5953 /* Enable signals. */
5954 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5955 /* Signal to the parent that we're ready. */
5956 pthread_mutex_lock(&info->mutex);
5957 pthread_cond_broadcast(&info->cond);
5958 pthread_mutex_unlock(&info->mutex);
5959 /* Wait until the parent has finshed initializing the tls state. */
5960 pthread_mutex_lock(&clone_lock);
5961 pthread_mutex_unlock(&clone_lock);
5967 /* do_fork() Must return host values and target errnos (unlike most
5968 do_*() functions). */
5969 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5970 abi_ulong parent_tidptr, target_ulong newtls,
5971 abi_ulong child_tidptr)
5973 CPUState *cpu = ENV_GET_CPU(env);
5977 CPUArchState *new_env;
5978 unsigned int nptl_flags;
5981 /* Emulate vfork() with fork() */
5982 if (flags & CLONE_VFORK)
5983 flags &= ~(CLONE_VFORK | CLONE_VM);
5985 if (flags & CLONE_VM) {
5986 TaskState *parent_ts = (TaskState *)cpu->opaque;
5987 new_thread_info info;
5988 pthread_attr_t attr;
5990 ts = g_new0(TaskState, 1);
5991 init_task_state(ts);
5992 /* we create a new CPU instance. */
5993 new_env = cpu_copy(env);
5994 /* Init regs that differ from the parent. */
5995 cpu_clone_regs(new_env, newsp);
5996 new_cpu = ENV_GET_CPU(new_env);
5997 new_cpu->opaque = ts;
5998 ts->bprm = parent_ts->bprm;
5999 ts->info = parent_ts->info;
6000 ts->signal_mask = parent_ts->signal_mask;
6002 flags &= ~CLONE_NPTL_FLAGS2;
6004 if (nptl_flags & CLONE_CHILD_CLEARTID) {
6005 ts->child_tidptr = child_tidptr;
6008 if (nptl_flags & CLONE_SETTLS)
6009 cpu_set_tls (new_env, newtls);
6011 /* Grab a mutex so that thread setup appears atomic. */
6012 pthread_mutex_lock(&clone_lock);
6014 memset(&info, 0, sizeof(info));
6015 pthread_mutex_init(&info.mutex, NULL);
6016 pthread_mutex_lock(&info.mutex);
6017 pthread_cond_init(&info.cond, NULL);
6019 if (nptl_flags & CLONE_CHILD_SETTID)
6020 info.child_tidptr = child_tidptr;
6021 if (nptl_flags & CLONE_PARENT_SETTID)
6022 info.parent_tidptr = parent_tidptr;
6024 ret = pthread_attr_init(&attr);
6025 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6026 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6027 /* It is not safe to deliver signals until the child has finished
6028 initializing, so temporarily block all signals. */
6029 sigfillset(&sigmask);
6030 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6032 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6033 /* TODO: Free new CPU state if thread creation failed. */
6035 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6036 pthread_attr_destroy(&attr);
6038 /* Wait for the child to initialize. */
6039 pthread_cond_wait(&info.cond, &info.mutex);
6041 if (flags & CLONE_PARENT_SETTID)
6042 put_user_u32(ret, parent_tidptr);
6046 pthread_mutex_unlock(&info.mutex);
6047 pthread_cond_destroy(&info.cond);
6048 pthread_mutex_destroy(&info.mutex);
6049 pthread_mutex_unlock(&clone_lock);
6051 /* if no CLONE_VM, we consider it is a fork */
6052 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
6053 return -TARGET_EINVAL;
6056 if (block_signals()) {
6057 return -TARGET_ERESTARTSYS;
6063 /* Child Process. */
6065 cpu_clone_regs(env, newsp);
6067 /* There is a race condition here. The parent process could
6068 theoretically read the TID in the child process before the child
6069 tid is set. This would require using either ptrace
6070 (not implemented) or having *_tidptr to point at a shared memory
6071 mapping. We can't repeat the spinlock hack used above because
6072 the child process gets its own copy of the lock. */
6073 if (flags & CLONE_CHILD_SETTID)
6074 put_user_u32(gettid(), child_tidptr);
6075 if (flags & CLONE_PARENT_SETTID)
6076 put_user_u32(gettid(), parent_tidptr);
6077 ts = (TaskState *)cpu->opaque;
6078 if (flags & CLONE_SETTLS)
6079 cpu_set_tls (env, newtls);
6080 if (flags & CLONE_CHILD_CLEARTID)
6081 ts->child_tidptr = child_tidptr;
6089 /* warning : doesn't handle linux specific flags... */
6090 static int target_to_host_fcntl_cmd(int cmd)
6093 case TARGET_F_DUPFD:
6094 case TARGET_F_GETFD:
6095 case TARGET_F_SETFD:
6096 case TARGET_F_GETFL:
6097 case TARGET_F_SETFL:
6099 case TARGET_F_GETLK:
6101 case TARGET_F_SETLK:
6103 case TARGET_F_SETLKW:
6105 case TARGET_F_GETOWN:
6107 case TARGET_F_SETOWN:
6109 case TARGET_F_GETSIG:
6111 case TARGET_F_SETSIG:
6113 #if TARGET_ABI_BITS == 32
6114 case TARGET_F_GETLK64:
6116 case TARGET_F_SETLK64:
6118 case TARGET_F_SETLKW64:
6121 case TARGET_F_SETLEASE:
6123 case TARGET_F_GETLEASE:
6125 #ifdef F_DUPFD_CLOEXEC
6126 case TARGET_F_DUPFD_CLOEXEC:
6127 return F_DUPFD_CLOEXEC;
6129 case TARGET_F_NOTIFY:
6132 case TARGET_F_GETOWN_EX:
6136 case TARGET_F_SETOWN_EX:
6140 case TARGET_F_SETPIPE_SZ:
6141 return F_SETPIPE_SZ;
6142 case TARGET_F_GETPIPE_SZ:
6143 return F_GETPIPE_SZ;
6146 return -TARGET_EINVAL;
6148 return -TARGET_EINVAL;
6151 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6152 static const bitmask_transtbl flock_tbl[] = {
6153 TRANSTBL_CONVERT(F_RDLCK),
6154 TRANSTBL_CONVERT(F_WRLCK),
6155 TRANSTBL_CONVERT(F_UNLCK),
6156 TRANSTBL_CONVERT(F_EXLCK),
6157 TRANSTBL_CONVERT(F_SHLCK),
6161 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6162 abi_ulong target_flock_addr)
6164 struct target_flock *target_fl;
6167 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6168 return -TARGET_EFAULT;
6171 __get_user(l_type, &target_fl->l_type);
6172 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6173 __get_user(fl->l_whence, &target_fl->l_whence);
6174 __get_user(fl->l_start, &target_fl->l_start);
6175 __get_user(fl->l_len, &target_fl->l_len);
6176 __get_user(fl->l_pid, &target_fl->l_pid);
6177 unlock_user_struct(target_fl, target_flock_addr, 0);
6181 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6182 const struct flock64 *fl)
6184 struct target_flock *target_fl;
6187 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6188 return -TARGET_EFAULT;
6191 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6192 __put_user(l_type, &target_fl->l_type);
6193 __put_user(fl->l_whence, &target_fl->l_whence);
6194 __put_user(fl->l_start, &target_fl->l_start);
6195 __put_user(fl->l_len, &target_fl->l_len);
6196 __put_user(fl->l_pid, &target_fl->l_pid);
6197 unlock_user_struct(target_fl, target_flock_addr, 1);
6201 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6202 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6204 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6205 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6206 abi_ulong target_flock_addr)
6208 struct target_eabi_flock64 *target_fl;
6211 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6212 return -TARGET_EFAULT;
6215 __get_user(l_type, &target_fl->l_type);
6216 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6217 __get_user(fl->l_whence, &target_fl->l_whence);
6218 __get_user(fl->l_start, &target_fl->l_start);
6219 __get_user(fl->l_len, &target_fl->l_len);
6220 __get_user(fl->l_pid, &target_fl->l_pid);
6221 unlock_user_struct(target_fl, target_flock_addr, 0);
6225 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6226 const struct flock64 *fl)
6228 struct target_eabi_flock64 *target_fl;
6231 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6232 return -TARGET_EFAULT;
6235 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6236 __put_user(l_type, &target_fl->l_type);
6237 __put_user(fl->l_whence, &target_fl->l_whence);
6238 __put_user(fl->l_start, &target_fl->l_start);
6239 __put_user(fl->l_len, &target_fl->l_len);
6240 __put_user(fl->l_pid, &target_fl->l_pid);
6241 unlock_user_struct(target_fl, target_flock_addr, 1);
6246 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6247 abi_ulong target_flock_addr)
6249 struct target_flock64 *target_fl;
6252 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6253 return -TARGET_EFAULT;
6256 __get_user(l_type, &target_fl->l_type);
6257 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6258 __get_user(fl->l_whence, &target_fl->l_whence);
6259 __get_user(fl->l_start, &target_fl->l_start);
6260 __get_user(fl->l_len, &target_fl->l_len);
6261 __get_user(fl->l_pid, &target_fl->l_pid);
6262 unlock_user_struct(target_fl, target_flock_addr, 0);
6266 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6267 const struct flock64 *fl)
6269 struct target_flock64 *target_fl;
6272 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6273 return -TARGET_EFAULT;
6276 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6277 __put_user(l_type, &target_fl->l_type);
6278 __put_user(fl->l_whence, &target_fl->l_whence);
6279 __put_user(fl->l_start, &target_fl->l_start);
6280 __put_user(fl->l_len, &target_fl->l_len);
6281 __put_user(fl->l_pid, &target_fl->l_pid);
6282 unlock_user_struct(target_fl, target_flock_addr, 1);
6286 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6288 struct flock64 fl64;
6290 struct f_owner_ex fox;
6291 struct target_f_owner_ex *target_fox;
6294 int host_cmd = target_to_host_fcntl_cmd(cmd);
6296 if (host_cmd == -TARGET_EINVAL)
6300 case TARGET_F_GETLK:
6301 ret = copy_from_user_flock(&fl64, arg);
6305 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6307 ret = copy_to_user_flock(arg, &fl64);
6311 case TARGET_F_SETLK:
6312 case TARGET_F_SETLKW:
6313 ret = copy_from_user_flock(&fl64, arg);
6317 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6320 case TARGET_F_GETLK64:
6321 ret = copy_from_user_flock64(&fl64, arg);
6325 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6327 ret = copy_to_user_flock64(arg, &fl64);
6330 case TARGET_F_SETLK64:
6331 case TARGET_F_SETLKW64:
6332 ret = copy_from_user_flock64(&fl64, arg);
6336 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6339 case TARGET_F_GETFL:
6340 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6342 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6346 case TARGET_F_SETFL:
6347 ret = get_errno(safe_fcntl(fd, host_cmd,
6348 target_to_host_bitmask(arg,
6353 case TARGET_F_GETOWN_EX:
6354 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6356 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6357 return -TARGET_EFAULT;
6358 target_fox->type = tswap32(fox.type);
6359 target_fox->pid = tswap32(fox.pid);
6360 unlock_user_struct(target_fox, arg, 1);
6366 case TARGET_F_SETOWN_EX:
6367 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6368 return -TARGET_EFAULT;
6369 fox.type = tswap32(target_fox->type);
6370 fox.pid = tswap32(target_fox->pid);
6371 unlock_user_struct(target_fox, arg, 0);
6372 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6376 case TARGET_F_SETOWN:
6377 case TARGET_F_GETOWN:
6378 case TARGET_F_SETSIG:
6379 case TARGET_F_GETSIG:
6380 case TARGET_F_SETLEASE:
6381 case TARGET_F_GETLEASE:
6382 case TARGET_F_SETPIPE_SZ:
6383 case TARGET_F_GETPIPE_SZ:
6384 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6388 ret = get_errno(safe_fcntl(fd, cmd, arg));
6396 static inline int high2lowuid(int uid)
6404 static inline int high2lowgid(int gid)
6412 static inline int low2highuid(int uid)
6414 if ((int16_t)uid == -1)
6420 static inline int low2highgid(int gid)
6422 if ((int16_t)gid == -1)
6427 static inline int tswapid(int id)
6432 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6434 #else /* !USE_UID16 */
6435 static inline int high2lowuid(int uid)
6439 static inline int high2lowgid(int gid)
6443 static inline int low2highuid(int uid)
6447 static inline int low2highgid(int gid)
6451 static inline int tswapid(int id)
6456 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6458 #endif /* USE_UID16 */
6460 /* We must do direct syscalls for setting UID/GID, because we want to
6461 * implement the Linux system call semantics of "change only for this thread",
6462 * not the libc/POSIX semantics of "change for all threads in process".
6463 * (See http://ewontfix.com/17/ for more details.)
6464 * We use the 32-bit version of the syscalls if present; if it is not
6465 * then either the host architecture supports 32-bit UIDs natively with
6466 * the standard syscall, or the 16-bit UID is the best we can do.
6468 #ifdef __NR_setuid32
6469 #define __NR_sys_setuid __NR_setuid32
6471 #define __NR_sys_setuid __NR_setuid
6473 #ifdef __NR_setgid32
6474 #define __NR_sys_setgid __NR_setgid32
6476 #define __NR_sys_setgid __NR_setgid
6478 #ifdef __NR_setresuid32
6479 #define __NR_sys_setresuid __NR_setresuid32
6481 #define __NR_sys_setresuid __NR_setresuid
6483 #ifdef __NR_setresgid32
6484 #define __NR_sys_setresgid __NR_setresgid32
6486 #define __NR_sys_setresgid __NR_setresgid
6489 _syscall1(int, sys_setuid, uid_t, uid)
6490 _syscall1(int, sys_setgid, gid_t, gid)
6491 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6492 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6494 void syscall_init(void)
6497 const argtype *arg_type;
6501 thunk_init(STRUCT_MAX);
6503 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6504 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6505 #include "syscall_types.h"
6507 #undef STRUCT_SPECIAL
6509 /* Build target_to_host_errno_table[] table from
6510 * host_to_target_errno_table[]. */
6511 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6512 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6515 /* we patch the ioctl size if necessary. We rely on the fact that
6516 no ioctl has all the bits at '1' in the size field */
6518 while (ie->target_cmd != 0) {
6519 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6520 TARGET_IOC_SIZEMASK) {
6521 arg_type = ie->arg_type;
6522 if (arg_type[0] != TYPE_PTR) {
6523 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6528 size = thunk_type_size(arg_type, 0);
6529 ie->target_cmd = (ie->target_cmd &
6530 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6531 (size << TARGET_IOC_SIZESHIFT);
6534 /* automatic consistency check if same arch */
6535 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6536 (defined(__x86_64__) && defined(TARGET_X86_64))
6537 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6538 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6539 ie->name, ie->target_cmd, ie->host_cmd);
6546 #if TARGET_ABI_BITS == 32
6547 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6549 #ifdef TARGET_WORDS_BIGENDIAN
6550 return ((uint64_t)word0 << 32) | word1;
6552 return ((uint64_t)word1 << 32) | word0;
6555 #else /* TARGET_ABI_BITS == 32 */
6556 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6560 #endif /* TARGET_ABI_BITS != 32 */
6562 #ifdef TARGET_NR_truncate64
6563 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6568 if (regpairs_aligned(cpu_env)) {
6572 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6576 #ifdef TARGET_NR_ftruncate64
6577 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6582 if (regpairs_aligned(cpu_env)) {
6586 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6590 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6591 abi_ulong target_addr)
6593 struct target_timespec *target_ts;
6595 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6596 return -TARGET_EFAULT;
6597 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6598 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6599 unlock_user_struct(target_ts, target_addr, 0);
6603 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6604 struct timespec *host_ts)
6606 struct target_timespec *target_ts;
6608 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6609 return -TARGET_EFAULT;
6610 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6611 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6612 unlock_user_struct(target_ts, target_addr, 1);
6616 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6617 abi_ulong target_addr)
6619 struct target_itimerspec *target_itspec;
6621 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6622 return -TARGET_EFAULT;
6625 host_itspec->it_interval.tv_sec =
6626 tswapal(target_itspec->it_interval.tv_sec);
6627 host_itspec->it_interval.tv_nsec =
6628 tswapal(target_itspec->it_interval.tv_nsec);
6629 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6630 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6632 unlock_user_struct(target_itspec, target_addr, 1);
6636 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6637 struct itimerspec *host_its)
6639 struct target_itimerspec *target_itspec;
6641 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6642 return -TARGET_EFAULT;
6645 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6646 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6648 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6649 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6651 unlock_user_struct(target_itspec, target_addr, 0);
6655 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6656 abi_ulong target_addr)
6658 struct target_sigevent *target_sevp;
6660 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6661 return -TARGET_EFAULT;
6664 /* This union is awkward on 64 bit systems because it has a 32 bit
6665 * integer and a pointer in it; we follow the conversion approach
6666 * used for handling sigval types in signal.c so the guest should get
6667 * the correct value back even if we did a 64 bit byteswap and it's
6668 * using the 32 bit integer.
6670 host_sevp->sigev_value.sival_ptr =
6671 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6672 host_sevp->sigev_signo =
6673 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6674 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6675 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6677 unlock_user_struct(target_sevp, target_addr, 1);
6681 #if defined(TARGET_NR_mlockall)
6682 static inline int target_to_host_mlockall_arg(int arg)
6686 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6687 result |= MCL_CURRENT;
6689 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6690 result |= MCL_FUTURE;
6696 static inline abi_long host_to_target_stat64(void *cpu_env,
6697 abi_ulong target_addr,
6698 struct stat *host_st)
6700 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6701 if (((CPUARMState *)cpu_env)->eabi) {
6702 struct target_eabi_stat64 *target_st;
6704 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6705 return -TARGET_EFAULT;
6706 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6707 __put_user(host_st->st_dev, &target_st->st_dev);
6708 __put_user(host_st->st_ino, &target_st->st_ino);
6709 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6710 __put_user(host_st->st_ino, &target_st->__st_ino);
6712 __put_user(host_st->st_mode, &target_st->st_mode);
6713 __put_user(host_st->st_nlink, &target_st->st_nlink);
6714 __put_user(host_st->st_uid, &target_st->st_uid);
6715 __put_user(host_st->st_gid, &target_st->st_gid);
6716 __put_user(host_st->st_rdev, &target_st->st_rdev);
6717 __put_user(host_st->st_size, &target_st->st_size);
6718 __put_user(host_st->st_blksize, &target_st->st_blksize);
6719 __put_user(host_st->st_blocks, &target_st->st_blocks);
6720 __put_user(host_st->st_atime, &target_st->target_st_atime);
6721 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6722 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6723 unlock_user_struct(target_st, target_addr, 1);
6727 #if defined(TARGET_HAS_STRUCT_STAT64)
6728 struct target_stat64 *target_st;
6730 struct target_stat *target_st;
6733 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6734 return -TARGET_EFAULT;
6735 memset(target_st, 0, sizeof(*target_st));
6736 __put_user(host_st->st_dev, &target_st->st_dev);
6737 __put_user(host_st->st_ino, &target_st->st_ino);
6738 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6739 __put_user(host_st->st_ino, &target_st->__st_ino);
6741 __put_user(host_st->st_mode, &target_st->st_mode);
6742 __put_user(host_st->st_nlink, &target_st->st_nlink);
6743 __put_user(host_st->st_uid, &target_st->st_uid);
6744 __put_user(host_st->st_gid, &target_st->st_gid);
6745 __put_user(host_st->st_rdev, &target_st->st_rdev);
6746 /* XXX: better use of kernel struct */
6747 __put_user(host_st->st_size, &target_st->st_size);
6748 __put_user(host_st->st_blksize, &target_st->st_blksize);
6749 __put_user(host_st->st_blocks, &target_st->st_blocks);
6750 __put_user(host_st->st_atime, &target_st->target_st_atime);
6751 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6752 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6753 unlock_user_struct(target_st, target_addr, 1);
6759 /* ??? Using host futex calls even when target atomic operations
6760 are not really atomic probably breaks things. However implementing
6761 futexes locally would make futexes shared between multiple processes
6762 tricky. However they're probably useless because guest atomic
6763 operations won't work either. */
6764 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6765 target_ulong uaddr2, int val3)
6767 struct timespec ts, *pts;
6770 /* ??? We assume FUTEX_* constants are the same on both host
6772 #ifdef FUTEX_CMD_MASK
6773 base_op = op & FUTEX_CMD_MASK;
6779 case FUTEX_WAIT_BITSET:
6782 target_to_host_timespec(pts, timeout);
6786 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6789 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6791 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6793 case FUTEX_CMP_REQUEUE:
6795 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6796 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6797 But the prototype takes a `struct timespec *'; insert casts
6798 to satisfy the compiler. We do not need to tswap TIMEOUT
6799 since it's not compared to guest memory. */
6800 pts = (struct timespec *)(uintptr_t) timeout;
6801 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6803 (base_op == FUTEX_CMP_REQUEUE
6807 return -TARGET_ENOSYS;
6810 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6811 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6812 abi_long handle, abi_long mount_id,
6815 struct file_handle *target_fh;
6816 struct file_handle *fh;
6820 unsigned int size, total_size;
6822 if (get_user_s32(size, handle)) {
6823 return -TARGET_EFAULT;
6826 name = lock_user_string(pathname);
6828 return -TARGET_EFAULT;
6831 total_size = sizeof(struct file_handle) + size;
6832 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6834 unlock_user(name, pathname, 0);
6835 return -TARGET_EFAULT;
6838 fh = g_malloc0(total_size);
6839 fh->handle_bytes = size;
6841 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6842 unlock_user(name, pathname, 0);
6844 /* man name_to_handle_at(2):
6845 * Other than the use of the handle_bytes field, the caller should treat
6846 * the file_handle structure as an opaque data type
6849 memcpy(target_fh, fh, total_size);
6850 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6851 target_fh->handle_type = tswap32(fh->handle_type);
6853 unlock_user(target_fh, handle, total_size);
6855 if (put_user_s32(mid, mount_id)) {
6856 return -TARGET_EFAULT;
6864 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6865 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6868 struct file_handle *target_fh;
6869 struct file_handle *fh;
6870 unsigned int size, total_size;
6873 if (get_user_s32(size, handle)) {
6874 return -TARGET_EFAULT;
6877 total_size = sizeof(struct file_handle) + size;
6878 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6880 return -TARGET_EFAULT;
6883 fh = g_memdup(target_fh, total_size);
6884 fh->handle_bytes = size;
6885 fh->handle_type = tswap32(target_fh->handle_type);
6887 ret = get_errno(open_by_handle_at(mount_fd, fh,
6888 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6892 unlock_user(target_fh, handle, total_size);
6898 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6900 /* signalfd siginfo conversion */
6903 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6904 const struct signalfd_siginfo *info)
6906 int sig = host_to_target_signal(info->ssi_signo);
6908 /* linux/signalfd.h defines a ssi_addr_lsb
6909 * not defined in sys/signalfd.h but used by some kernels
6912 #ifdef BUS_MCEERR_AO
6913 if (tinfo->ssi_signo == SIGBUS &&
6914 (tinfo->ssi_code == BUS_MCEERR_AR ||
6915 tinfo->ssi_code == BUS_MCEERR_AO)) {
6916 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6917 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6918 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6922 tinfo->ssi_signo = tswap32(sig);
6923 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6924 tinfo->ssi_code = tswap32(info->ssi_code);
6925 tinfo->ssi_pid = tswap32(info->ssi_pid);
6926 tinfo->ssi_uid = tswap32(info->ssi_uid);
6927 tinfo->ssi_fd = tswap32(info->ssi_fd);
6928 tinfo->ssi_tid = tswap32(info->ssi_tid);
6929 tinfo->ssi_band = tswap32(info->ssi_band);
6930 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6931 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6932 tinfo->ssi_status = tswap32(info->ssi_status);
6933 tinfo->ssi_int = tswap32(info->ssi_int);
6934 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6935 tinfo->ssi_utime = tswap64(info->ssi_utime);
6936 tinfo->ssi_stime = tswap64(info->ssi_stime);
6937 tinfo->ssi_addr = tswap64(info->ssi_addr);
6940 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6944 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6945 host_to_target_signalfd_siginfo(buf + i, buf + i);
6951 static TargetFdTrans target_signalfd_trans = {
6952 .host_to_target_data = host_to_target_data_signalfd,
6955 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6958 target_sigset_t *target_mask;
6962 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6963 return -TARGET_EINVAL;
6965 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6966 return -TARGET_EFAULT;
6969 target_to_host_sigset(&host_mask, target_mask);
6971 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6973 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6975 fd_trans_register(ret, &target_signalfd_trans);
6978 unlock_user_struct(target_mask, mask, 0);
6984 /* Map host to target signal numbers for the wait family of syscalls.
6985 Assume all other status bits are the same. */
6986 int host_to_target_waitstatus(int status)
6988 if (WIFSIGNALED(status)) {
6989 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6991 if (WIFSTOPPED(status)) {
6992 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6998 static int open_self_cmdline(void *cpu_env, int fd)
7001 bool word_skipped = false;
7003 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7013 nb_read = read(fd_orig, buf, sizeof(buf));
7016 fd_orig = close(fd_orig);
7019 } else if (nb_read == 0) {
7023 if (!word_skipped) {
7024 /* Skip the first string, which is the path to qemu-*-static
7025 instead of the actual command. */
7026 cp_buf = memchr(buf, 0, nb_read);
7028 /* Null byte found, skip one string */
7030 nb_read -= cp_buf - buf;
7031 word_skipped = true;
7036 if (write(fd, cp_buf, nb_read) != nb_read) {
7045 return close(fd_orig);
7048 static int open_self_maps(void *cpu_env, int fd)
7050 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7051 TaskState *ts = cpu->opaque;
7057 fp = fopen("/proc/self/maps", "r");
7062 while ((read = getline(&line, &len, fp)) != -1) {
7063 int fields, dev_maj, dev_min, inode;
7064 uint64_t min, max, offset;
7065 char flag_r, flag_w, flag_x, flag_p;
7066 char path[512] = "";
7067 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7068 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7069 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7071 if ((fields < 10) || (fields > 11)) {
7074 if (h2g_valid(min)) {
7075 int flags = page_get_flags(h2g(min));
7076 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7077 if (page_check_range(h2g(min), max - min, flags) == -1) {
7080 if (h2g(min) == ts->info->stack_limit) {
7081 pstrcpy(path, sizeof(path), " [stack]");
7083 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7084 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7085 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7086 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7087 path[0] ? " " : "", path);
7097 static int open_self_stat(void *cpu_env, int fd)
7099 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7100 TaskState *ts = cpu->opaque;
7101 abi_ulong start_stack = ts->info->start_stack;
7104 for (i = 0; i < 44; i++) {
7112 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7113 } else if (i == 1) {
7115 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7116 } else if (i == 27) {
7119 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7121 /* for the rest, there is MasterCard */
7122 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7126 if (write(fd, buf, len) != len) {
7134 static int open_self_auxv(void *cpu_env, int fd)
7136 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7137 TaskState *ts = cpu->opaque;
7138 abi_ulong auxv = ts->info->saved_auxv;
7139 abi_ulong len = ts->info->auxv_len;
7143 * Auxiliary vector is stored in target process stack.
7144 * read in whole auxv vector and copy it to file
7146 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7150 r = write(fd, ptr, len);
7157 lseek(fd, 0, SEEK_SET);
7158 unlock_user(ptr, auxv, len);
7164 static int is_proc_myself(const char *filename, const char *entry)
7166 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7167 filename += strlen("/proc/");
7168 if (!strncmp(filename, "self/", strlen("self/"))) {
7169 filename += strlen("self/");
7170 } else if (*filename >= '1' && *filename <= '9') {
7172 snprintf(myself, sizeof(myself), "%d/", getpid());
7173 if (!strncmp(filename, myself, strlen(myself))) {
7174 filename += strlen(myself);
7181 if (!strcmp(filename, entry)) {
7188 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7189 static int is_proc(const char *filename, const char *entry)
7191 return strcmp(filename, entry) == 0;
7194 static int open_net_route(void *cpu_env, int fd)
7201 fp = fopen("/proc/net/route", "r");
7208 read = getline(&line, &len, fp);
7209 dprintf(fd, "%s", line);
7213 while ((read = getline(&line, &len, fp)) != -1) {
7215 uint32_t dest, gw, mask;
7216 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7217 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7218 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7219 &mask, &mtu, &window, &irtt);
7220 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7221 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7222 metric, tswap32(mask), mtu, window, irtt);
7232 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7235 const char *filename;
7236 int (*fill)(void *cpu_env, int fd);
7237 int (*cmp)(const char *s1, const char *s2);
7239 const struct fake_open *fake_open;
7240 static const struct fake_open fakes[] = {
7241 { "maps", open_self_maps, is_proc_myself },
7242 { "stat", open_self_stat, is_proc_myself },
7243 { "auxv", open_self_auxv, is_proc_myself },
7244 { "cmdline", open_self_cmdline, is_proc_myself },
7245 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7246 { "/proc/net/route", open_net_route, is_proc },
7248 { NULL, NULL, NULL }
7251 if (is_proc_myself(pathname, "exe")) {
7252 int execfd = qemu_getauxval(AT_EXECFD);
7253 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7256 for (fake_open = fakes; fake_open->filename; fake_open++) {
7257 if (fake_open->cmp(pathname, fake_open->filename)) {
7262 if (fake_open->filename) {
7264 char filename[PATH_MAX];
7267 /* create temporary file to map stat to */
7268 tmpdir = getenv("TMPDIR");
7271 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7272 fd = mkstemp(filename);
7278 if ((r = fake_open->fill(cpu_env, fd))) {
7284 lseek(fd, 0, SEEK_SET);
7289 return safe_openat(dirfd, path(pathname), flags, mode);
7292 #define TIMER_MAGIC 0x0caf0000
7293 #define TIMER_MAGIC_MASK 0xffff0000
7295 /* Convert QEMU provided timer ID back to internal 16bit index format */
7296 static target_timer_t get_timer_id(abi_long arg)
7298 target_timer_t timerid = arg;
7300 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7301 return -TARGET_EINVAL;
7306 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7307 return -TARGET_EINVAL;
7313 /* do_syscall() should always have a single exit point at the end so
7314 that actions, such as logging of syscall results, can be performed.
7315 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7316 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7317 abi_long arg2, abi_long arg3, abi_long arg4,
7318 abi_long arg5, abi_long arg6, abi_long arg7,
7321 CPUState *cpu = ENV_GET_CPU(cpu_env);
7327 #if defined(DEBUG_ERESTARTSYS)
7328 /* Debug-only code for exercising the syscall-restart code paths
7329 * in the per-architecture cpu main loops: restart every syscall
7330 * the guest makes once before letting it through.
7337 return -TARGET_ERESTARTSYS;
7343 gemu_log("syscall %d", num);
7345 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7347 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7350 case TARGET_NR_exit:
7351 /* In old applications this may be used to implement _exit(2).
7352 However in threaded applictions it is used for thread termination,
7353 and _exit_group is used for application termination.
7354 Do thread termination if we have more then one thread. */
7356 if (block_signals()) {
7357 ret = -TARGET_ERESTARTSYS;
7361 if (CPU_NEXT(first_cpu)) {
7365 /* Remove the CPU from the list. */
7366 QTAILQ_REMOVE(&cpus, cpu, node);
7369 if (ts->child_tidptr) {
7370 put_user_u32(0, ts->child_tidptr);
7371 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7375 object_unref(OBJECT(cpu));
7377 rcu_unregister_thread();
7383 gdb_exit(cpu_env, arg1);
7385 ret = 0; /* avoid warning */
7387 case TARGET_NR_read:
7391 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7393 ret = get_errno(safe_read(arg1, p, arg3));
7395 fd_trans_host_to_target_data(arg1)) {
7396 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7398 unlock_user(p, arg2, ret);
7401 case TARGET_NR_write:
7402 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7404 ret = get_errno(safe_write(arg1, p, arg3));
7405 unlock_user(p, arg2, 0);
7407 #ifdef TARGET_NR_open
7408 case TARGET_NR_open:
7409 if (!(p = lock_user_string(arg1)))
7411 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7412 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7414 fd_trans_unregister(ret);
7415 unlock_user(p, arg1, 0);
7418 case TARGET_NR_openat:
7419 if (!(p = lock_user_string(arg2)))
7421 ret = get_errno(do_openat(cpu_env, arg1, p,
7422 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7424 fd_trans_unregister(ret);
7425 unlock_user(p, arg2, 0);
7427 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7428 case TARGET_NR_name_to_handle_at:
7429 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7432 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7433 case TARGET_NR_open_by_handle_at:
7434 ret = do_open_by_handle_at(arg1, arg2, arg3);
7435 fd_trans_unregister(ret);
7438 case TARGET_NR_close:
7439 fd_trans_unregister(arg1);
7440 ret = get_errno(close(arg1));
7445 #ifdef TARGET_NR_fork
7446 case TARGET_NR_fork:
7447 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7450 #ifdef TARGET_NR_waitpid
7451 case TARGET_NR_waitpid:
7454 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7455 if (!is_error(ret) && arg2 && ret
7456 && put_user_s32(host_to_target_waitstatus(status), arg2))
7461 #ifdef TARGET_NR_waitid
7462 case TARGET_NR_waitid:
7466 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7467 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7468 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7470 host_to_target_siginfo(p, &info);
7471 unlock_user(p, arg3, sizeof(target_siginfo_t));
7476 #ifdef TARGET_NR_creat /* not on alpha */
7477 case TARGET_NR_creat:
7478 if (!(p = lock_user_string(arg1)))
7480 ret = get_errno(creat(p, arg2));
7481 fd_trans_unregister(ret);
7482 unlock_user(p, arg1, 0);
7485 #ifdef TARGET_NR_link
7486 case TARGET_NR_link:
7489 p = lock_user_string(arg1);
7490 p2 = lock_user_string(arg2);
7492 ret = -TARGET_EFAULT;
7494 ret = get_errno(link(p, p2));
7495 unlock_user(p2, arg2, 0);
7496 unlock_user(p, arg1, 0);
7500 #if defined(TARGET_NR_linkat)
7501 case TARGET_NR_linkat:
7506 p = lock_user_string(arg2);
7507 p2 = lock_user_string(arg4);
7509 ret = -TARGET_EFAULT;
7511 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7512 unlock_user(p, arg2, 0);
7513 unlock_user(p2, arg4, 0);
7517 #ifdef TARGET_NR_unlink
7518 case TARGET_NR_unlink:
7519 if (!(p = lock_user_string(arg1)))
7521 ret = get_errno(unlink(p));
7522 unlock_user(p, arg1, 0);
7525 #if defined(TARGET_NR_unlinkat)
7526 case TARGET_NR_unlinkat:
7527 if (!(p = lock_user_string(arg2)))
7529 ret = get_errno(unlinkat(arg1, p, arg3));
7530 unlock_user(p, arg2, 0);
7533 case TARGET_NR_execve:
7535 char **argp, **envp;
7538 abi_ulong guest_argp;
7539 abi_ulong guest_envp;
7546 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7547 if (get_user_ual(addr, gp))
7555 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7556 if (get_user_ual(addr, gp))
7563 argp = alloca((argc + 1) * sizeof(void *));
7564 envp = alloca((envc + 1) * sizeof(void *));
7566 for (gp = guest_argp, q = argp; gp;
7567 gp += sizeof(abi_ulong), q++) {
7568 if (get_user_ual(addr, gp))
7572 if (!(*q = lock_user_string(addr)))
7574 total_size += strlen(*q) + 1;
7578 for (gp = guest_envp, q = envp; gp;
7579 gp += sizeof(abi_ulong), q++) {
7580 if (get_user_ual(addr, gp))
7584 if (!(*q = lock_user_string(addr)))
7586 total_size += strlen(*q) + 1;
7590 if (!(p = lock_user_string(arg1)))
7592 /* Although execve() is not an interruptible syscall it is
7593 * a special case where we must use the safe_syscall wrapper:
7594 * if we allow a signal to happen before we make the host
7595 * syscall then we will 'lose' it, because at the point of
7596 * execve the process leaves QEMU's control. So we use the
7597 * safe syscall wrapper to ensure that we either take the
7598 * signal as a guest signal, or else it does not happen
7599 * before the execve completes and makes it the other
7600 * program's problem.
7602 ret = get_errno(safe_execve(p, argp, envp));
7603 unlock_user(p, arg1, 0);
7608 ret = -TARGET_EFAULT;
7611 for (gp = guest_argp, q = argp; *q;
7612 gp += sizeof(abi_ulong), q++) {
7613 if (get_user_ual(addr, gp)
7616 unlock_user(*q, addr, 0);
7618 for (gp = guest_envp, q = envp; *q;
7619 gp += sizeof(abi_ulong), q++) {
7620 if (get_user_ual(addr, gp)
7623 unlock_user(*q, addr, 0);
7627 case TARGET_NR_chdir:
7628 if (!(p = lock_user_string(arg1)))
7630 ret = get_errno(chdir(p));
7631 unlock_user(p, arg1, 0);
7633 #ifdef TARGET_NR_time
7634 case TARGET_NR_time:
7637 ret = get_errno(time(&host_time));
7640 && put_user_sal(host_time, arg1))
7645 #ifdef TARGET_NR_mknod
7646 case TARGET_NR_mknod:
7647 if (!(p = lock_user_string(arg1)))
7649 ret = get_errno(mknod(p, arg2, arg3));
7650 unlock_user(p, arg1, 0);
7653 #if defined(TARGET_NR_mknodat)
7654 case TARGET_NR_mknodat:
7655 if (!(p = lock_user_string(arg2)))
7657 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7658 unlock_user(p, arg2, 0);
7661 #ifdef TARGET_NR_chmod
7662 case TARGET_NR_chmod:
7663 if (!(p = lock_user_string(arg1)))
7665 ret = get_errno(chmod(p, arg2));
7666 unlock_user(p, arg1, 0);
7669 #ifdef TARGET_NR_break
7670 case TARGET_NR_break:
7673 #ifdef TARGET_NR_oldstat
7674 case TARGET_NR_oldstat:
7677 case TARGET_NR_lseek:
7678 ret = get_errno(lseek(arg1, arg2, arg3));
7680 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7681 /* Alpha specific */
7682 case TARGET_NR_getxpid:
7683 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7684 ret = get_errno(getpid());
7687 #ifdef TARGET_NR_getpid
7688 case TARGET_NR_getpid:
7689 ret = get_errno(getpid());
7692 case TARGET_NR_mount:
7694 /* need to look at the data field */
7698 p = lock_user_string(arg1);
7706 p2 = lock_user_string(arg2);
7709 unlock_user(p, arg1, 0);
7715 p3 = lock_user_string(arg3);
7718 unlock_user(p, arg1, 0);
7720 unlock_user(p2, arg2, 0);
7727 /* FIXME - arg5 should be locked, but it isn't clear how to
7728 * do that since it's not guaranteed to be a NULL-terminated
7732 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7734 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7736 ret = get_errno(ret);
7739 unlock_user(p, arg1, 0);
7741 unlock_user(p2, arg2, 0);
7743 unlock_user(p3, arg3, 0);
7747 #ifdef TARGET_NR_umount
7748 case TARGET_NR_umount:
7749 if (!(p = lock_user_string(arg1)))
7751 ret = get_errno(umount(p));
7752 unlock_user(p, arg1, 0);
7755 #ifdef TARGET_NR_stime /* not on alpha */
7756 case TARGET_NR_stime:
7759 if (get_user_sal(host_time, arg1))
7761 ret = get_errno(stime(&host_time));
7765 case TARGET_NR_ptrace:
7767 #ifdef TARGET_NR_alarm /* not on alpha */
7768 case TARGET_NR_alarm:
7772 #ifdef TARGET_NR_oldfstat
7773 case TARGET_NR_oldfstat:
7776 #ifdef TARGET_NR_pause /* not on alpha */
7777 case TARGET_NR_pause:
7778 if (!block_signals()) {
7779 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7781 ret = -TARGET_EINTR;
7784 #ifdef TARGET_NR_utime
7785 case TARGET_NR_utime:
7787 struct utimbuf tbuf, *host_tbuf;
7788 struct target_utimbuf *target_tbuf;
7790 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7792 tbuf.actime = tswapal(target_tbuf->actime);
7793 tbuf.modtime = tswapal(target_tbuf->modtime);
7794 unlock_user_struct(target_tbuf, arg2, 0);
7799 if (!(p = lock_user_string(arg1)))
7801 ret = get_errno(utime(p, host_tbuf));
7802 unlock_user(p, arg1, 0);
7806 #ifdef TARGET_NR_utimes
7807 case TARGET_NR_utimes:
7809 struct timeval *tvp, tv[2];
7811 if (copy_from_user_timeval(&tv[0], arg2)
7812 || copy_from_user_timeval(&tv[1],
7813 arg2 + sizeof(struct target_timeval)))
7819 if (!(p = lock_user_string(arg1)))
7821 ret = get_errno(utimes(p, tvp));
7822 unlock_user(p, arg1, 0);
7826 #if defined(TARGET_NR_futimesat)
7827 case TARGET_NR_futimesat:
7829 struct timeval *tvp, tv[2];
7831 if (copy_from_user_timeval(&tv[0], arg3)
7832 || copy_from_user_timeval(&tv[1],
7833 arg3 + sizeof(struct target_timeval)))
7839 if (!(p = lock_user_string(arg2)))
7841 ret = get_errno(futimesat(arg1, path(p), tvp));
7842 unlock_user(p, arg2, 0);
7846 #ifdef TARGET_NR_stty
7847 case TARGET_NR_stty:
7850 #ifdef TARGET_NR_gtty
7851 case TARGET_NR_gtty:
7854 #ifdef TARGET_NR_access
7855 case TARGET_NR_access:
7856 if (!(p = lock_user_string(arg1)))
7858 ret = get_errno(access(path(p), arg2));
7859 unlock_user(p, arg1, 0);
7862 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7863 case TARGET_NR_faccessat:
7864 if (!(p = lock_user_string(arg2)))
7866 ret = get_errno(faccessat(arg1, p, arg3, 0));
7867 unlock_user(p, arg2, 0);
7870 #ifdef TARGET_NR_nice /* not on alpha */
7871 case TARGET_NR_nice:
7872 ret = get_errno(nice(arg1));
7875 #ifdef TARGET_NR_ftime
7876 case TARGET_NR_ftime:
7879 case TARGET_NR_sync:
7883 case TARGET_NR_kill:
7884 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7886 #ifdef TARGET_NR_rename
7887 case TARGET_NR_rename:
7890 p = lock_user_string(arg1);
7891 p2 = lock_user_string(arg2);
7893 ret = -TARGET_EFAULT;
7895 ret = get_errno(rename(p, p2));
7896 unlock_user(p2, arg2, 0);
7897 unlock_user(p, arg1, 0);
7901 #if defined(TARGET_NR_renameat)
7902 case TARGET_NR_renameat:
7905 p = lock_user_string(arg2);
7906 p2 = lock_user_string(arg4);
7908 ret = -TARGET_EFAULT;
7910 ret = get_errno(renameat(arg1, p, arg3, p2));
7911 unlock_user(p2, arg4, 0);
7912 unlock_user(p, arg2, 0);
7916 #ifdef TARGET_NR_mkdir
7917 case TARGET_NR_mkdir:
7918 if (!(p = lock_user_string(arg1)))
7920 ret = get_errno(mkdir(p, arg2));
7921 unlock_user(p, arg1, 0);
7924 #if defined(TARGET_NR_mkdirat)
7925 case TARGET_NR_mkdirat:
7926 if (!(p = lock_user_string(arg2)))
7928 ret = get_errno(mkdirat(arg1, p, arg3));
7929 unlock_user(p, arg2, 0);
7932 #ifdef TARGET_NR_rmdir
7933 case TARGET_NR_rmdir:
7934 if (!(p = lock_user_string(arg1)))
7936 ret = get_errno(rmdir(p));
7937 unlock_user(p, arg1, 0);
7941 ret = get_errno(dup(arg1));
7943 fd_trans_dup(arg1, ret);
7946 #ifdef TARGET_NR_pipe
7947 case TARGET_NR_pipe:
7948 ret = do_pipe(cpu_env, arg1, 0, 0);
7951 #ifdef TARGET_NR_pipe2
7952 case TARGET_NR_pipe2:
7953 ret = do_pipe(cpu_env, arg1,
7954 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7957 case TARGET_NR_times:
7959 struct target_tms *tmsp;
7961 ret = get_errno(times(&tms));
7963 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7966 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7967 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7968 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7969 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7972 ret = host_to_target_clock_t(ret);
7975 #ifdef TARGET_NR_prof
7976 case TARGET_NR_prof:
7979 #ifdef TARGET_NR_signal
7980 case TARGET_NR_signal:
7983 case TARGET_NR_acct:
7985 ret = get_errno(acct(NULL));
7987 if (!(p = lock_user_string(arg1)))
7989 ret = get_errno(acct(path(p)));
7990 unlock_user(p, arg1, 0);
7993 #ifdef TARGET_NR_umount2
7994 case TARGET_NR_umount2:
7995 if (!(p = lock_user_string(arg1)))
7997 ret = get_errno(umount2(p, arg2));
7998 unlock_user(p, arg1, 0);
8001 #ifdef TARGET_NR_lock
8002 case TARGET_NR_lock:
8005 case TARGET_NR_ioctl:
8006 ret = do_ioctl(arg1, arg2, arg3);
8008 case TARGET_NR_fcntl:
8009 ret = do_fcntl(arg1, arg2, arg3);
8011 #ifdef TARGET_NR_mpx
8015 case TARGET_NR_setpgid:
8016 ret = get_errno(setpgid(arg1, arg2));
8018 #ifdef TARGET_NR_ulimit
8019 case TARGET_NR_ulimit:
8022 #ifdef TARGET_NR_oldolduname
8023 case TARGET_NR_oldolduname:
8026 case TARGET_NR_umask:
8027 ret = get_errno(umask(arg1));
8029 case TARGET_NR_chroot:
8030 if (!(p = lock_user_string(arg1)))
8032 ret = get_errno(chroot(p));
8033 unlock_user(p, arg1, 0);
8035 #ifdef TARGET_NR_ustat
8036 case TARGET_NR_ustat:
8039 #ifdef TARGET_NR_dup2
8040 case TARGET_NR_dup2:
8041 ret = get_errno(dup2(arg1, arg2));
8043 fd_trans_dup(arg1, arg2);
8047 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8048 case TARGET_NR_dup3:
8049 ret = get_errno(dup3(arg1, arg2, arg3));
8051 fd_trans_dup(arg1, arg2);
8055 #ifdef TARGET_NR_getppid /* not on alpha */
8056 case TARGET_NR_getppid:
8057 ret = get_errno(getppid());
8060 #ifdef TARGET_NR_getpgrp
8061 case TARGET_NR_getpgrp:
8062 ret = get_errno(getpgrp());
8065 case TARGET_NR_setsid:
8066 ret = get_errno(setsid());
8068 #ifdef TARGET_NR_sigaction
8069 case TARGET_NR_sigaction:
8071 #if defined(TARGET_ALPHA)
8072 struct target_sigaction act, oact, *pact = 0;
8073 struct target_old_sigaction *old_act;
8075 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8077 act._sa_handler = old_act->_sa_handler;
8078 target_siginitset(&act.sa_mask, old_act->sa_mask);
8079 act.sa_flags = old_act->sa_flags;
8080 act.sa_restorer = 0;
8081 unlock_user_struct(old_act, arg2, 0);
8084 ret = get_errno(do_sigaction(arg1, pact, &oact));
8085 if (!is_error(ret) && arg3) {
8086 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8088 old_act->_sa_handler = oact._sa_handler;
8089 old_act->sa_mask = oact.sa_mask.sig[0];
8090 old_act->sa_flags = oact.sa_flags;
8091 unlock_user_struct(old_act, arg3, 1);
8093 #elif defined(TARGET_MIPS)
8094 struct target_sigaction act, oact, *pact, *old_act;
8097 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8099 act._sa_handler = old_act->_sa_handler;
8100 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8101 act.sa_flags = old_act->sa_flags;
8102 unlock_user_struct(old_act, arg2, 0);
8108 ret = get_errno(do_sigaction(arg1, pact, &oact));
8110 if (!is_error(ret) && arg3) {
8111 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8113 old_act->_sa_handler = oact._sa_handler;
8114 old_act->sa_flags = oact.sa_flags;
8115 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8116 old_act->sa_mask.sig[1] = 0;
8117 old_act->sa_mask.sig[2] = 0;
8118 old_act->sa_mask.sig[3] = 0;
8119 unlock_user_struct(old_act, arg3, 1);
8122 struct target_old_sigaction *old_act;
8123 struct target_sigaction act, oact, *pact;
8125 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8127 act._sa_handler = old_act->_sa_handler;
8128 target_siginitset(&act.sa_mask, old_act->sa_mask);
8129 act.sa_flags = old_act->sa_flags;
8130 act.sa_restorer = old_act->sa_restorer;
8131 unlock_user_struct(old_act, arg2, 0);
8136 ret = get_errno(do_sigaction(arg1, pact, &oact));
8137 if (!is_error(ret) && arg3) {
8138 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8140 old_act->_sa_handler = oact._sa_handler;
8141 old_act->sa_mask = oact.sa_mask.sig[0];
8142 old_act->sa_flags = oact.sa_flags;
8143 old_act->sa_restorer = oact.sa_restorer;
8144 unlock_user_struct(old_act, arg3, 1);
8150 case TARGET_NR_rt_sigaction:
8152 #if defined(TARGET_ALPHA)
8153 struct target_sigaction act, oact, *pact = 0;
8154 struct target_rt_sigaction *rt_act;
8156 if (arg4 != sizeof(target_sigset_t)) {
8157 ret = -TARGET_EINVAL;
8161 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8163 act._sa_handler = rt_act->_sa_handler;
8164 act.sa_mask = rt_act->sa_mask;
8165 act.sa_flags = rt_act->sa_flags;
8166 act.sa_restorer = arg5;
8167 unlock_user_struct(rt_act, arg2, 0);
8170 ret = get_errno(do_sigaction(arg1, pact, &oact));
8171 if (!is_error(ret) && arg3) {
8172 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8174 rt_act->_sa_handler = oact._sa_handler;
8175 rt_act->sa_mask = oact.sa_mask;
8176 rt_act->sa_flags = oact.sa_flags;
8177 unlock_user_struct(rt_act, arg3, 1);
8180 struct target_sigaction *act;
8181 struct target_sigaction *oact;
8183 if (arg4 != sizeof(target_sigset_t)) {
8184 ret = -TARGET_EINVAL;
8188 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8193 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8194 ret = -TARGET_EFAULT;
8195 goto rt_sigaction_fail;
8199 ret = get_errno(do_sigaction(arg1, act, oact));
8202 unlock_user_struct(act, arg2, 0);
8204 unlock_user_struct(oact, arg3, 1);
8208 #ifdef TARGET_NR_sgetmask /* not on alpha */
8209 case TARGET_NR_sgetmask:
8212 abi_ulong target_set;
8213 ret = do_sigprocmask(0, NULL, &cur_set);
8215 host_to_target_old_sigset(&target_set, &cur_set);
8221 #ifdef TARGET_NR_ssetmask /* not on alpha */
8222 case TARGET_NR_ssetmask:
8224 sigset_t set, oset, cur_set;
8225 abi_ulong target_set = arg1;
8226 /* We only have one word of the new mask so we must read
8227 * the rest of it with do_sigprocmask() and OR in this word.
8228 * We are guaranteed that a do_sigprocmask() that only queries
8229 * the signal mask will not fail.
8231 ret = do_sigprocmask(0, NULL, &cur_set);
8233 target_to_host_old_sigset(&set, &target_set);
8234 sigorset(&set, &set, &cur_set);
8235 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8237 host_to_target_old_sigset(&target_set, &oset);
8243 #ifdef TARGET_NR_sigprocmask
8244 case TARGET_NR_sigprocmask:
8246 #if defined(TARGET_ALPHA)
8247 sigset_t set, oldset;
8252 case TARGET_SIG_BLOCK:
8255 case TARGET_SIG_UNBLOCK:
8258 case TARGET_SIG_SETMASK:
8262 ret = -TARGET_EINVAL;
8266 target_to_host_old_sigset(&set, &mask);
8268 ret = do_sigprocmask(how, &set, &oldset);
8269 if (!is_error(ret)) {
8270 host_to_target_old_sigset(&mask, &oldset);
8272 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8275 sigset_t set, oldset, *set_ptr;
8280 case TARGET_SIG_BLOCK:
8283 case TARGET_SIG_UNBLOCK:
8286 case TARGET_SIG_SETMASK:
8290 ret = -TARGET_EINVAL;
8293 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8295 target_to_host_old_sigset(&set, p);
8296 unlock_user(p, arg2, 0);
8302 ret = do_sigprocmask(how, set_ptr, &oldset);
8303 if (!is_error(ret) && arg3) {
8304 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8306 host_to_target_old_sigset(p, &oldset);
8307 unlock_user(p, arg3, sizeof(target_sigset_t));
8313 case TARGET_NR_rt_sigprocmask:
8316 sigset_t set, oldset, *set_ptr;
8318 if (arg4 != sizeof(target_sigset_t)) {
8319 ret = -TARGET_EINVAL;
8325 case TARGET_SIG_BLOCK:
8328 case TARGET_SIG_UNBLOCK:
8331 case TARGET_SIG_SETMASK:
8335 ret = -TARGET_EINVAL;
8338 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8340 target_to_host_sigset(&set, p);
8341 unlock_user(p, arg2, 0);
8347 ret = do_sigprocmask(how, set_ptr, &oldset);
8348 if (!is_error(ret) && arg3) {
8349 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8351 host_to_target_sigset(p, &oldset);
8352 unlock_user(p, arg3, sizeof(target_sigset_t));
8356 #ifdef TARGET_NR_sigpending
8357 case TARGET_NR_sigpending:
8360 ret = get_errno(sigpending(&set));
8361 if (!is_error(ret)) {
8362 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8364 host_to_target_old_sigset(p, &set);
8365 unlock_user(p, arg1, sizeof(target_sigset_t));
8370 case TARGET_NR_rt_sigpending:
8374 /* Yes, this check is >, not != like most. We follow the kernel's
8375 * logic and it does it like this because it implements
8376 * NR_sigpending through the same code path, and in that case
8377 * the old_sigset_t is smaller in size.
8379 if (arg2 > sizeof(target_sigset_t)) {
8380 ret = -TARGET_EINVAL;
8384 ret = get_errno(sigpending(&set));
8385 if (!is_error(ret)) {
8386 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8388 host_to_target_sigset(p, &set);
8389 unlock_user(p, arg1, sizeof(target_sigset_t));
8393 #ifdef TARGET_NR_sigsuspend
8394 case TARGET_NR_sigsuspend:
8396 TaskState *ts = cpu->opaque;
8397 #if defined(TARGET_ALPHA)
8398 abi_ulong mask = arg1;
8399 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8401 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8403 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8404 unlock_user(p, arg1, 0);
8406 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8408 if (ret != -TARGET_ERESTARTSYS) {
8409 ts->in_sigsuspend = 1;
8414 case TARGET_NR_rt_sigsuspend:
8416 TaskState *ts = cpu->opaque;
8418 if (arg2 != sizeof(target_sigset_t)) {
8419 ret = -TARGET_EINVAL;
8422 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8424 target_to_host_sigset(&ts->sigsuspend_mask, p);
8425 unlock_user(p, arg1, 0);
8426 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8428 if (ret != -TARGET_ERESTARTSYS) {
8429 ts->in_sigsuspend = 1;
8433 case TARGET_NR_rt_sigtimedwait:
8436 struct timespec uts, *puts;
8439 if (arg4 != sizeof(target_sigset_t)) {
8440 ret = -TARGET_EINVAL;
8444 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8446 target_to_host_sigset(&set, p);
8447 unlock_user(p, arg1, 0);
8450 target_to_host_timespec(puts, arg3);
8454 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8456 if (!is_error(ret)) {
8458 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8463 host_to_target_siginfo(p, &uinfo);
8464 unlock_user(p, arg2, sizeof(target_siginfo_t));
8466 ret = host_to_target_signal(ret);
8470 case TARGET_NR_rt_sigqueueinfo:
8474 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8478 target_to_host_siginfo(&uinfo, p);
8479 unlock_user(p, arg1, 0);
8480 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8483 #ifdef TARGET_NR_sigreturn
8484 case TARGET_NR_sigreturn:
8485 if (block_signals()) {
8486 ret = -TARGET_ERESTARTSYS;
8488 ret = do_sigreturn(cpu_env);
8492 case TARGET_NR_rt_sigreturn:
8493 if (block_signals()) {
8494 ret = -TARGET_ERESTARTSYS;
8496 ret = do_rt_sigreturn(cpu_env);
8499 case TARGET_NR_sethostname:
8500 if (!(p = lock_user_string(arg1)))
8502 ret = get_errno(sethostname(p, arg2));
8503 unlock_user(p, arg1, 0);
8505 case TARGET_NR_setrlimit:
8507 int resource = target_to_host_resource(arg1);
8508 struct target_rlimit *target_rlim;
8510 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8512 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8513 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8514 unlock_user_struct(target_rlim, arg2, 0);
8515 ret = get_errno(setrlimit(resource, &rlim));
8518 case TARGET_NR_getrlimit:
8520 int resource = target_to_host_resource(arg1);
8521 struct target_rlimit *target_rlim;
8524 ret = get_errno(getrlimit(resource, &rlim));
8525 if (!is_error(ret)) {
8526 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8528 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8529 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8530 unlock_user_struct(target_rlim, arg2, 1);
8534 case TARGET_NR_getrusage:
8536 struct rusage rusage;
8537 ret = get_errno(getrusage(arg1, &rusage));
8538 if (!is_error(ret)) {
8539 ret = host_to_target_rusage(arg2, &rusage);
8543 case TARGET_NR_gettimeofday:
8546 ret = get_errno(gettimeofday(&tv, NULL));
8547 if (!is_error(ret)) {
8548 if (copy_to_user_timeval(arg1, &tv))
8553 case TARGET_NR_settimeofday:
8555 struct timeval tv, *ptv = NULL;
8556 struct timezone tz, *ptz = NULL;
8559 if (copy_from_user_timeval(&tv, arg1)) {
8566 if (copy_from_user_timezone(&tz, arg2)) {
8572 ret = get_errno(settimeofday(ptv, ptz));
8575 #if defined(TARGET_NR_select)
8576 case TARGET_NR_select:
8577 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8578 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8581 struct target_sel_arg_struct *sel;
8582 abi_ulong inp, outp, exp, tvp;
8585 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
8587 nsel = tswapal(sel->n);
8588 inp = tswapal(sel->inp);
8589 outp = tswapal(sel->outp);
8590 exp = tswapal(sel->exp);
8591 tvp = tswapal(sel->tvp);
8592 unlock_user_struct(sel, arg1, 0);
8593 ret = do_select(nsel, inp, outp, exp, tvp);
8598 #ifdef TARGET_NR_pselect6
8599 case TARGET_NR_pselect6:
8601 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8602 fd_set rfds, wfds, efds;
8603 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8604 struct timespec ts, *ts_ptr;
8607 * The 6th arg is actually two args smashed together,
8608 * so we cannot use the C library.
8616 abi_ulong arg_sigset, arg_sigsize, *arg7;
8617 target_sigset_t *target_sigset;
8625 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8629 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8633 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8639 * This takes a timespec, and not a timeval, so we cannot
8640 * use the do_select() helper ...
8643 if (target_to_host_timespec(&ts, ts_addr)) {
8651 /* Extract the two packed args for the sigset */
8654 sig.size = SIGSET_T_SIZE;
8656 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8660 arg_sigset = tswapal(arg7[0]);
8661 arg_sigsize = tswapal(arg7[1]);
8662 unlock_user(arg7, arg6, 0);
8666 if (arg_sigsize != sizeof(*target_sigset)) {
8667 /* Like the kernel, we enforce correct size sigsets */
8668 ret = -TARGET_EINVAL;
8671 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8672 sizeof(*target_sigset), 1);
8673 if (!target_sigset) {
8676 target_to_host_sigset(&set, target_sigset);
8677 unlock_user(target_sigset, arg_sigset, 0);
8685 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8688 if (!is_error(ret)) {
8689 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8691 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8693 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8696 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8702 #ifdef TARGET_NR_symlink
8703 case TARGET_NR_symlink:
8706 p = lock_user_string(arg1);
8707 p2 = lock_user_string(arg2);
8709 ret = -TARGET_EFAULT;
8711 ret = get_errno(symlink(p, p2));
8712 unlock_user(p2, arg2, 0);
8713 unlock_user(p, arg1, 0);
8717 #if defined(TARGET_NR_symlinkat)
8718 case TARGET_NR_symlinkat:
8721 p = lock_user_string(arg1);
8722 p2 = lock_user_string(arg3);
8724 ret = -TARGET_EFAULT;
8726 ret = get_errno(symlinkat(p, arg2, p2));
8727 unlock_user(p2, arg3, 0);
8728 unlock_user(p, arg1, 0);
8732 #ifdef TARGET_NR_oldlstat
8733 case TARGET_NR_oldlstat:
8736 #ifdef TARGET_NR_readlink
8737 case TARGET_NR_readlink:
8740 p = lock_user_string(arg1);
8741 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8743 ret = -TARGET_EFAULT;
8745 /* Short circuit this for the magic exe check. */
8746 ret = -TARGET_EINVAL;
8747 } else if (is_proc_myself((const char *)p, "exe")) {
8748 char real[PATH_MAX], *temp;
8749 temp = realpath(exec_path, real);
8750 /* Return value is # of bytes that we wrote to the buffer. */
8752 ret = get_errno(-1);
8754 /* Don't worry about sign mismatch as earlier mapping
8755 * logic would have thrown a bad address error. */
8756 ret = MIN(strlen(real), arg3);
8757 /* We cannot NUL terminate the string. */
8758 memcpy(p2, real, ret);
8761 ret = get_errno(readlink(path(p), p2, arg3));
8763 unlock_user(p2, arg2, ret);
8764 unlock_user(p, arg1, 0);
8768 #if defined(TARGET_NR_readlinkat)
8769 case TARGET_NR_readlinkat:
8772 p = lock_user_string(arg2);
8773 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8775 ret = -TARGET_EFAULT;
8776 } else if (is_proc_myself((const char *)p, "exe")) {
8777 char real[PATH_MAX], *temp;
8778 temp = realpath(exec_path, real);
8779 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8780 snprintf((char *)p2, arg4, "%s", real);
8782 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8784 unlock_user(p2, arg3, ret);
8785 unlock_user(p, arg2, 0);
8789 #ifdef TARGET_NR_uselib
8790 case TARGET_NR_uselib:
8793 #ifdef TARGET_NR_swapon
8794 case TARGET_NR_swapon:
8795 if (!(p = lock_user_string(arg1)))
8797 ret = get_errno(swapon(p, arg2));
8798 unlock_user(p, arg1, 0);
8801 case TARGET_NR_reboot:
8802 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8803 /* arg4 must be ignored in all other cases */
8804 p = lock_user_string(arg4);
8808 ret = get_errno(reboot(arg1, arg2, arg3, p));
8809 unlock_user(p, arg4, 0);
8811 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8814 #ifdef TARGET_NR_readdir
8815 case TARGET_NR_readdir:
8818 #ifdef TARGET_NR_mmap
8819 case TARGET_NR_mmap:
8820 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8821 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8822 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8823 || defined(TARGET_S390X)
8826 abi_ulong v1, v2, v3, v4, v5, v6;
8827 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8835 unlock_user(v, arg1, 0);
8836 ret = get_errno(target_mmap(v1, v2, v3,
8837 target_to_host_bitmask(v4, mmap_flags_tbl),
8841 ret = get_errno(target_mmap(arg1, arg2, arg3,
8842 target_to_host_bitmask(arg4, mmap_flags_tbl),
8848 #ifdef TARGET_NR_mmap2
8849 case TARGET_NR_mmap2:
8851 #define MMAP_SHIFT 12
8853 ret = get_errno(target_mmap(arg1, arg2, arg3,
8854 target_to_host_bitmask(arg4, mmap_flags_tbl),
8856 arg6 << MMAP_SHIFT));
8859 case TARGET_NR_munmap:
8860 ret = get_errno(target_munmap(arg1, arg2));
8862 case TARGET_NR_mprotect:
8864 TaskState *ts = cpu->opaque;
8865 /* Special hack to detect libc making the stack executable. */
8866 if ((arg3 & PROT_GROWSDOWN)
8867 && arg1 >= ts->info->stack_limit
8868 && arg1 <= ts->info->start_stack) {
8869 arg3 &= ~PROT_GROWSDOWN;
8870 arg2 = arg2 + arg1 - ts->info->stack_limit;
8871 arg1 = ts->info->stack_limit;
8874 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8876 #ifdef TARGET_NR_mremap
8877 case TARGET_NR_mremap:
8878 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8881 /* ??? msync/mlock/munlock are broken for softmmu. */
8882 #ifdef TARGET_NR_msync
8883 case TARGET_NR_msync:
8884 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8887 #ifdef TARGET_NR_mlock
8888 case TARGET_NR_mlock:
8889 ret = get_errno(mlock(g2h(arg1), arg2));
8892 #ifdef TARGET_NR_munlock
8893 case TARGET_NR_munlock:
8894 ret = get_errno(munlock(g2h(arg1), arg2));
8897 #ifdef TARGET_NR_mlockall
8898 case TARGET_NR_mlockall:
8899 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8902 #ifdef TARGET_NR_munlockall
8903 case TARGET_NR_munlockall:
8904 ret = get_errno(munlockall());
8907 case TARGET_NR_truncate:
8908 if (!(p = lock_user_string(arg1)))
8910 ret = get_errno(truncate(p, arg2));
8911 unlock_user(p, arg1, 0);
8913 case TARGET_NR_ftruncate:
8914 ret = get_errno(ftruncate(arg1, arg2));
8916 case TARGET_NR_fchmod:
8917 ret = get_errno(fchmod(arg1, arg2));
8919 #if defined(TARGET_NR_fchmodat)
8920 case TARGET_NR_fchmodat:
8921 if (!(p = lock_user_string(arg2)))
8923 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8924 unlock_user(p, arg2, 0);
8927 case TARGET_NR_getpriority:
8928 /* Note that negative values are valid for getpriority, so we must
8929 differentiate based on errno settings. */
8931 ret = getpriority(arg1, arg2);
8932 if (ret == -1 && errno != 0) {
8933 ret = -host_to_target_errno(errno);
8937 /* Return value is the unbiased priority. Signal no error. */
8938 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8940 /* Return value is a biased priority to avoid negative numbers. */
8944 case TARGET_NR_setpriority:
8945 ret = get_errno(setpriority(arg1, arg2, arg3));
8947 #ifdef TARGET_NR_profil
8948 case TARGET_NR_profil:
8951 case TARGET_NR_statfs:
8952 if (!(p = lock_user_string(arg1)))
8954 ret = get_errno(statfs(path(p), &stfs));
8955 unlock_user(p, arg1, 0);
8957 if (!is_error(ret)) {
8958 struct target_statfs *target_stfs;
8960 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8962 __put_user(stfs.f_type, &target_stfs->f_type);
8963 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8964 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8965 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8966 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8967 __put_user(stfs.f_files, &target_stfs->f_files);
8968 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8969 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8970 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8971 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8972 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8973 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8974 unlock_user_struct(target_stfs, arg2, 1);
8977 case TARGET_NR_fstatfs:
8978 ret = get_errno(fstatfs(arg1, &stfs));
8979 goto convert_statfs;
8980 #ifdef TARGET_NR_statfs64
8981 case TARGET_NR_statfs64:
8982 if (!(p = lock_user_string(arg1)))
8984 ret = get_errno(statfs(path(p), &stfs));
8985 unlock_user(p, arg1, 0);
8987 if (!is_error(ret)) {
8988 struct target_statfs64 *target_stfs;
8990 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8992 __put_user(stfs.f_type, &target_stfs->f_type);
8993 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8994 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8995 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8996 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8997 __put_user(stfs.f_files, &target_stfs->f_files);
8998 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8999 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9000 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9001 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9002 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9003 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9004 unlock_user_struct(target_stfs, arg3, 1);
9007 case TARGET_NR_fstatfs64:
9008 ret = get_errno(fstatfs(arg1, &stfs));
9009 goto convert_statfs64;
9011 #ifdef TARGET_NR_ioperm
9012 case TARGET_NR_ioperm:
9015 #ifdef TARGET_NR_socketcall
9016 case TARGET_NR_socketcall:
9017 ret = do_socketcall(arg1, arg2);
9020 #ifdef TARGET_NR_accept
9021 case TARGET_NR_accept:
9022 ret = do_accept4(arg1, arg2, arg3, 0);
9025 #ifdef TARGET_NR_accept4
9026 case TARGET_NR_accept4:
9027 ret = do_accept4(arg1, arg2, arg3, arg4);
9030 #ifdef TARGET_NR_bind
9031 case TARGET_NR_bind:
9032 ret = do_bind(arg1, arg2, arg3);
9035 #ifdef TARGET_NR_connect
9036 case TARGET_NR_connect:
9037 ret = do_connect(arg1, arg2, arg3);
9040 #ifdef TARGET_NR_getpeername
9041 case TARGET_NR_getpeername:
9042 ret = do_getpeername(arg1, arg2, arg3);
9045 #ifdef TARGET_NR_getsockname
9046 case TARGET_NR_getsockname:
9047 ret = do_getsockname(arg1, arg2, arg3);
9050 #ifdef TARGET_NR_getsockopt
9051 case TARGET_NR_getsockopt:
9052 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9055 #ifdef TARGET_NR_listen
9056 case TARGET_NR_listen:
9057 ret = get_errno(listen(arg1, arg2));
9060 #ifdef TARGET_NR_recv
9061 case TARGET_NR_recv:
9062 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9065 #ifdef TARGET_NR_recvfrom
9066 case TARGET_NR_recvfrom:
9067 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9070 #ifdef TARGET_NR_recvmsg
9071 case TARGET_NR_recvmsg:
9072 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9075 #ifdef TARGET_NR_send
9076 case TARGET_NR_send:
9077 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9080 #ifdef TARGET_NR_sendmsg
9081 case TARGET_NR_sendmsg:
9082 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9085 #ifdef TARGET_NR_sendmmsg
9086 case TARGET_NR_sendmmsg:
9087 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9089 case TARGET_NR_recvmmsg:
9090 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9093 #ifdef TARGET_NR_sendto
9094 case TARGET_NR_sendto:
9095 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9098 #ifdef TARGET_NR_shutdown
9099 case TARGET_NR_shutdown:
9100 ret = get_errno(shutdown(arg1, arg2));
9103 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9104 case TARGET_NR_getrandom:
9105 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9109 ret = get_errno(getrandom(p, arg2, arg3));
9110 unlock_user(p, arg1, ret);
9113 #ifdef TARGET_NR_socket
9114 case TARGET_NR_socket:
9115 ret = do_socket(arg1, arg2, arg3);
9116 fd_trans_unregister(ret);
9119 #ifdef TARGET_NR_socketpair
9120 case TARGET_NR_socketpair:
9121 ret = do_socketpair(arg1, arg2, arg3, arg4);
9124 #ifdef TARGET_NR_setsockopt
9125 case TARGET_NR_setsockopt:
9126 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9130 case TARGET_NR_syslog:
9131 if (!(p = lock_user_string(arg2)))
9133 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9134 unlock_user(p, arg2, 0);
9137 case TARGET_NR_setitimer:
9139 struct itimerval value, ovalue, *pvalue;
9143 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9144 || copy_from_user_timeval(&pvalue->it_value,
9145 arg2 + sizeof(struct target_timeval)))
9150 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9151 if (!is_error(ret) && arg3) {
9152 if (copy_to_user_timeval(arg3,
9153 &ovalue.it_interval)
9154 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9160 case TARGET_NR_getitimer:
9162 struct itimerval value;
9164 ret = get_errno(getitimer(arg1, &value));
9165 if (!is_error(ret) && arg2) {
9166 if (copy_to_user_timeval(arg2,
9168 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9174 #ifdef TARGET_NR_stat
9175 case TARGET_NR_stat:
9176 if (!(p = lock_user_string(arg1)))
9178 ret = get_errno(stat(path(p), &st));
9179 unlock_user(p, arg1, 0);
9182 #ifdef TARGET_NR_lstat
9183 case TARGET_NR_lstat:
9184 if (!(p = lock_user_string(arg1)))
9186 ret = get_errno(lstat(path(p), &st));
9187 unlock_user(p, arg1, 0);
9190 case TARGET_NR_fstat:
9192 ret = get_errno(fstat(arg1, &st));
9193 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9196 if (!is_error(ret)) {
9197 struct target_stat *target_st;
9199 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9201 memset(target_st, 0, sizeof(*target_st));
9202 __put_user(st.st_dev, &target_st->st_dev);
9203 __put_user(st.st_ino, &target_st->st_ino);
9204 __put_user(st.st_mode, &target_st->st_mode);
9205 __put_user(st.st_uid, &target_st->st_uid);
9206 __put_user(st.st_gid, &target_st->st_gid);
9207 __put_user(st.st_nlink, &target_st->st_nlink);
9208 __put_user(st.st_rdev, &target_st->st_rdev);
9209 __put_user(st.st_size, &target_st->st_size);
9210 __put_user(st.st_blksize, &target_st->st_blksize);
9211 __put_user(st.st_blocks, &target_st->st_blocks);
9212 __put_user(st.st_atime, &target_st->target_st_atime);
9213 __put_user(st.st_mtime, &target_st->target_st_mtime);
9214 __put_user(st.st_ctime, &target_st->target_st_ctime);
9215 unlock_user_struct(target_st, arg2, 1);
9219 #ifdef TARGET_NR_olduname
9220 case TARGET_NR_olduname:
9223 #ifdef TARGET_NR_iopl
9224 case TARGET_NR_iopl:
9227 case TARGET_NR_vhangup:
9228 ret = get_errno(vhangup());
9230 #ifdef TARGET_NR_idle
9231 case TARGET_NR_idle:
9234 #ifdef TARGET_NR_syscall
9235 case TARGET_NR_syscall:
9236 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9237 arg6, arg7, arg8, 0);
9240 case TARGET_NR_wait4:
9243 abi_long status_ptr = arg2;
9244 struct rusage rusage, *rusage_ptr;
9245 abi_ulong target_rusage = arg4;
9246 abi_long rusage_err;
9248 rusage_ptr = &rusage;
9251 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9252 if (!is_error(ret)) {
9253 if (status_ptr && ret) {
9254 status = host_to_target_waitstatus(status);
9255 if (put_user_s32(status, status_ptr))
9258 if (target_rusage) {
9259 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9267 #ifdef TARGET_NR_swapoff
9268 case TARGET_NR_swapoff:
9269 if (!(p = lock_user_string(arg1)))
9271 ret = get_errno(swapoff(p));
9272 unlock_user(p, arg1, 0);
9275 case TARGET_NR_sysinfo:
9277 struct target_sysinfo *target_value;
9278 struct sysinfo value;
9279 ret = get_errno(sysinfo(&value));
9280 if (!is_error(ret) && arg1)
9282 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9284 __put_user(value.uptime, &target_value->uptime);
9285 __put_user(value.loads[0], &target_value->loads[0]);
9286 __put_user(value.loads[1], &target_value->loads[1]);
9287 __put_user(value.loads[2], &target_value->loads[2]);
9288 __put_user(value.totalram, &target_value->totalram);
9289 __put_user(value.freeram, &target_value->freeram);
9290 __put_user(value.sharedram, &target_value->sharedram);
9291 __put_user(value.bufferram, &target_value->bufferram);
9292 __put_user(value.totalswap, &target_value->totalswap);
9293 __put_user(value.freeswap, &target_value->freeswap);
9294 __put_user(value.procs, &target_value->procs);
9295 __put_user(value.totalhigh, &target_value->totalhigh);
9296 __put_user(value.freehigh, &target_value->freehigh);
9297 __put_user(value.mem_unit, &target_value->mem_unit);
9298 unlock_user_struct(target_value, arg1, 1);
9302 #ifdef TARGET_NR_ipc
9304 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
9307 #ifdef TARGET_NR_semget
9308 case TARGET_NR_semget:
9309 ret = get_errno(semget(arg1, arg2, arg3));
9312 #ifdef TARGET_NR_semop
9313 case TARGET_NR_semop:
9314 ret = do_semop(arg1, arg2, arg3);
9317 #ifdef TARGET_NR_semctl
9318 case TARGET_NR_semctl:
9319 ret = do_semctl(arg1, arg2, arg3, arg4);
9322 #ifdef TARGET_NR_msgctl
9323 case TARGET_NR_msgctl:
9324 ret = do_msgctl(arg1, arg2, arg3);
9327 #ifdef TARGET_NR_msgget
9328 case TARGET_NR_msgget:
9329 ret = get_errno(msgget(arg1, arg2));
9332 #ifdef TARGET_NR_msgrcv
9333 case TARGET_NR_msgrcv:
9334 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9337 #ifdef TARGET_NR_msgsnd
9338 case TARGET_NR_msgsnd:
9339 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9342 #ifdef TARGET_NR_shmget
9343 case TARGET_NR_shmget:
9344 ret = get_errno(shmget(arg1, arg2, arg3));
9347 #ifdef TARGET_NR_shmctl
9348 case TARGET_NR_shmctl:
9349 ret = do_shmctl(arg1, arg2, arg3);
9352 #ifdef TARGET_NR_shmat
9353 case TARGET_NR_shmat:
9354 ret = do_shmat(arg1, arg2, arg3);
9357 #ifdef TARGET_NR_shmdt
9358 case TARGET_NR_shmdt:
9359 ret = do_shmdt(arg1);
9362 case TARGET_NR_fsync:
9363 ret = get_errno(fsync(arg1));
9365 case TARGET_NR_clone:
9366 /* Linux manages to have three different orderings for its
9367 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9368 * match the kernel's CONFIG_CLONE_* settings.
9369 * Microblaze is further special in that it uses a sixth
9370 * implicit argument to clone for the TLS pointer.
9372 #if defined(TARGET_MICROBLAZE)
9373 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9374 #elif defined(TARGET_CLONE_BACKWARDS)
9375 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9376 #elif defined(TARGET_CLONE_BACKWARDS2)
9377 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9379 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9382 #ifdef __NR_exit_group
9383 /* new thread calls */
9384 case TARGET_NR_exit_group:
9388 gdb_exit(cpu_env, arg1);
9389 ret = get_errno(exit_group(arg1));
9392 case TARGET_NR_setdomainname:
9393 if (!(p = lock_user_string(arg1)))
9395 ret = get_errno(setdomainname(p, arg2));
9396 unlock_user(p, arg1, 0);
9398 case TARGET_NR_uname:
9399 /* no need to transcode because we use the linux syscall */
9401 struct new_utsname * buf;
9403 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9405 ret = get_errno(sys_uname(buf));
9406 if (!is_error(ret)) {
9407 /* Overwrite the native machine name with whatever is being
9409 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9410 /* Allow the user to override the reported release. */
9411 if (qemu_uname_release && *qemu_uname_release) {
9412 g_strlcpy(buf->release, qemu_uname_release,
9413 sizeof(buf->release));
9416 unlock_user_struct(buf, arg1, 1);
9420 case TARGET_NR_modify_ldt:
9421 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9423 #if !defined(TARGET_X86_64)
9424 case TARGET_NR_vm86old:
9426 case TARGET_NR_vm86:
9427 ret = do_vm86(cpu_env, arg1, arg2);
9431 case TARGET_NR_adjtimex:
9433 #ifdef TARGET_NR_create_module
9434 case TARGET_NR_create_module:
9436 case TARGET_NR_init_module:
9437 case TARGET_NR_delete_module:
9438 #ifdef TARGET_NR_get_kernel_syms
9439 case TARGET_NR_get_kernel_syms:
9442 case TARGET_NR_quotactl:
9444 case TARGET_NR_getpgid:
9445 ret = get_errno(getpgid(arg1));
9447 case TARGET_NR_fchdir:
9448 ret = get_errno(fchdir(arg1));
9450 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9451 case TARGET_NR_bdflush:
9454 #ifdef TARGET_NR_sysfs
9455 case TARGET_NR_sysfs:
9458 case TARGET_NR_personality:
9459 ret = get_errno(personality(arg1));
9461 #ifdef TARGET_NR_afs_syscall
9462 case TARGET_NR_afs_syscall:
9465 #ifdef TARGET_NR__llseek /* Not on alpha */
9466 case TARGET_NR__llseek:
9469 #if !defined(__NR_llseek)
9470 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9472 ret = get_errno(res);
9477 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9479 if ((ret == 0) && put_user_s64(res, arg4)) {
9485 #ifdef TARGET_NR_getdents
9486 case TARGET_NR_getdents:
9487 #ifdef __NR_getdents
9488 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9490 struct target_dirent *target_dirp;
9491 struct linux_dirent *dirp;
9492 abi_long count = arg3;
9494 dirp = g_try_malloc(count);
9496 ret = -TARGET_ENOMEM;
9500 ret = get_errno(sys_getdents(arg1, dirp, count));
9501 if (!is_error(ret)) {
9502 struct linux_dirent *de;
9503 struct target_dirent *tde;
9505 int reclen, treclen;
9506 int count1, tnamelen;
9510 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9514 reclen = de->d_reclen;
9515 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9516 assert(tnamelen >= 0);
9517 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9518 assert(count1 + treclen <= count);
9519 tde->d_reclen = tswap16(treclen);
9520 tde->d_ino = tswapal(de->d_ino);
9521 tde->d_off = tswapal(de->d_off);
9522 memcpy(tde->d_name, de->d_name, tnamelen);
9523 de = (struct linux_dirent *)((char *)de + reclen);
9525 tde = (struct target_dirent *)((char *)tde + treclen);
9529 unlock_user(target_dirp, arg2, ret);
9535 struct linux_dirent *dirp;
9536 abi_long count = arg3;
9538 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9540 ret = get_errno(sys_getdents(arg1, dirp, count));
9541 if (!is_error(ret)) {
9542 struct linux_dirent *de;
9547 reclen = de->d_reclen;
9550 de->d_reclen = tswap16(reclen);
9551 tswapls(&de->d_ino);
9552 tswapls(&de->d_off);
9553 de = (struct linux_dirent *)((char *)de + reclen);
9557 unlock_user(dirp, arg2, ret);
9561 /* Implement getdents in terms of getdents64 */
9563 struct linux_dirent64 *dirp;
9564 abi_long count = arg3;
9566 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9570 ret = get_errno(sys_getdents64(arg1, dirp, count));
9571 if (!is_error(ret)) {
9572 /* Convert the dirent64 structs to target dirent. We do this
9573 * in-place, since we can guarantee that a target_dirent is no
9574 * larger than a dirent64; however this means we have to be
9575 * careful to read everything before writing in the new format.
9577 struct linux_dirent64 *de;
9578 struct target_dirent *tde;
9583 tde = (struct target_dirent *)dirp;
9585 int namelen, treclen;
9586 int reclen = de->d_reclen;
9587 uint64_t ino = de->d_ino;
9588 int64_t off = de->d_off;
9589 uint8_t type = de->d_type;
9591 namelen = strlen(de->d_name);
9592 treclen = offsetof(struct target_dirent, d_name)
9594 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9596 memmove(tde->d_name, de->d_name, namelen + 1);
9597 tde->d_ino = tswapal(ino);
9598 tde->d_off = tswapal(off);
9599 tde->d_reclen = tswap16(treclen);
9600 /* The target_dirent type is in what was formerly a padding
9601 * byte at the end of the structure:
9603 *(((char *)tde) + treclen - 1) = type;
9605 de = (struct linux_dirent64 *)((char *)de + reclen);
9606 tde = (struct target_dirent *)((char *)tde + treclen);
9612 unlock_user(dirp, arg2, ret);
9616 #endif /* TARGET_NR_getdents */
9617 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9618 case TARGET_NR_getdents64:
9620 struct linux_dirent64 *dirp;
9621 abi_long count = arg3;
9622 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9624 ret = get_errno(sys_getdents64(arg1, dirp, count));
9625 if (!is_error(ret)) {
9626 struct linux_dirent64 *de;
9631 reclen = de->d_reclen;
9634 de->d_reclen = tswap16(reclen);
9635 tswap64s((uint64_t *)&de->d_ino);
9636 tswap64s((uint64_t *)&de->d_off);
9637 de = (struct linux_dirent64 *)((char *)de + reclen);
9641 unlock_user(dirp, arg2, ret);
9644 #endif /* TARGET_NR_getdents64 */
9645 #if defined(TARGET_NR__newselect)
9646 case TARGET_NR__newselect:
9647 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9650 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9651 # ifdef TARGET_NR_poll
9652 case TARGET_NR_poll:
9654 # ifdef TARGET_NR_ppoll
9655 case TARGET_NR_ppoll:
9658 struct target_pollfd *target_pfd;
9659 unsigned int nfds = arg2;
9666 target_pfd = lock_user(VERIFY_WRITE, arg1,
9667 sizeof(struct target_pollfd) * nfds, 1);
9672 pfd = alloca(sizeof(struct pollfd) * nfds);
9673 for (i = 0; i < nfds; i++) {
9674 pfd[i].fd = tswap32(target_pfd[i].fd);
9675 pfd[i].events = tswap16(target_pfd[i].events);
9680 # ifdef TARGET_NR_ppoll
9681 case TARGET_NR_ppoll:
9683 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9684 target_sigset_t *target_set;
9685 sigset_t _set, *set = &_set;
9688 if (target_to_host_timespec(timeout_ts, arg3)) {
9689 unlock_user(target_pfd, arg1, 0);
9697 if (arg5 != sizeof(target_sigset_t)) {
9698 unlock_user(target_pfd, arg1, 0);
9699 ret = -TARGET_EINVAL;
9703 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9705 unlock_user(target_pfd, arg1, 0);
9708 target_to_host_sigset(set, target_set);
9713 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9714 set, SIGSET_T_SIZE));
9716 if (!is_error(ret) && arg3) {
9717 host_to_target_timespec(arg3, timeout_ts);
9720 unlock_user(target_set, arg4, 0);
9725 # ifdef TARGET_NR_poll
9726 case TARGET_NR_poll:
9728 struct timespec ts, *pts;
9731 /* Convert ms to secs, ns */
9732 ts.tv_sec = arg3 / 1000;
9733 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9736 /* -ve poll() timeout means "infinite" */
9739 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9744 g_assert_not_reached();
9747 if (!is_error(ret)) {
9748 for(i = 0; i < nfds; i++) {
9749 target_pfd[i].revents = tswap16(pfd[i].revents);
9752 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9756 case TARGET_NR_flock:
9757 /* NOTE: the flock constant seems to be the same for every
9759 ret = get_errno(safe_flock(arg1, arg2));
9761 case TARGET_NR_readv:
9763 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9765 ret = get_errno(safe_readv(arg1, vec, arg3));
9766 unlock_iovec(vec, arg2, arg3, 1);
9768 ret = -host_to_target_errno(errno);
9772 case TARGET_NR_writev:
9774 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9776 ret = get_errno(safe_writev(arg1, vec, arg3));
9777 unlock_iovec(vec, arg2, arg3, 0);
9779 ret = -host_to_target_errno(errno);
9783 case TARGET_NR_getsid:
9784 ret = get_errno(getsid(arg1));
9786 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9787 case TARGET_NR_fdatasync:
9788 ret = get_errno(fdatasync(arg1));
9791 #ifdef TARGET_NR__sysctl
9792 case TARGET_NR__sysctl:
9793 /* We don't implement this, but ENOTDIR is always a safe
9795 ret = -TARGET_ENOTDIR;
9798 case TARGET_NR_sched_getaffinity:
9800 unsigned int mask_size;
9801 unsigned long *mask;
9804 * sched_getaffinity needs multiples of ulong, so need to take
9805 * care of mismatches between target ulong and host ulong sizes.
9807 if (arg2 & (sizeof(abi_ulong) - 1)) {
9808 ret = -TARGET_EINVAL;
9811 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9813 mask = alloca(mask_size);
9814 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9816 if (!is_error(ret)) {
9818 /* More data returned than the caller's buffer will fit.
9819 * This only happens if sizeof(abi_long) < sizeof(long)
9820 * and the caller passed us a buffer holding an odd number
9821 * of abi_longs. If the host kernel is actually using the
9822 * extra 4 bytes then fail EINVAL; otherwise we can just
9823 * ignore them and only copy the interesting part.
9825 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9826 if (numcpus > arg2 * 8) {
9827 ret = -TARGET_EINVAL;
9833 if (copy_to_user(arg3, mask, ret)) {
9839 case TARGET_NR_sched_setaffinity:
9841 unsigned int mask_size;
9842 unsigned long *mask;
9845 * sched_setaffinity needs multiples of ulong, so need to take
9846 * care of mismatches between target ulong and host ulong sizes.
9848 if (arg2 & (sizeof(abi_ulong) - 1)) {
9849 ret = -TARGET_EINVAL;
9852 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9854 mask = alloca(mask_size);
9855 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9858 memcpy(mask, p, arg2);
9859 unlock_user_struct(p, arg2, 0);
9861 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9864 case TARGET_NR_sched_setparam:
9866 struct sched_param *target_schp;
9867 struct sched_param schp;
9870 return -TARGET_EINVAL;
9872 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9874 schp.sched_priority = tswap32(target_schp->sched_priority);
9875 unlock_user_struct(target_schp, arg2, 0);
9876 ret = get_errno(sched_setparam(arg1, &schp));
9879 case TARGET_NR_sched_getparam:
9881 struct sched_param *target_schp;
9882 struct sched_param schp;
9885 return -TARGET_EINVAL;
9887 ret = get_errno(sched_getparam(arg1, &schp));
9888 if (!is_error(ret)) {
9889 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9891 target_schp->sched_priority = tswap32(schp.sched_priority);
9892 unlock_user_struct(target_schp, arg2, 1);
9896 case TARGET_NR_sched_setscheduler:
9898 struct sched_param *target_schp;
9899 struct sched_param schp;
9901 return -TARGET_EINVAL;
9903 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9905 schp.sched_priority = tswap32(target_schp->sched_priority);
9906 unlock_user_struct(target_schp, arg3, 0);
9907 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9910 case TARGET_NR_sched_getscheduler:
9911 ret = get_errno(sched_getscheduler(arg1));
9913 case TARGET_NR_sched_yield:
9914 ret = get_errno(sched_yield());
9916 case TARGET_NR_sched_get_priority_max:
9917 ret = get_errno(sched_get_priority_max(arg1));
9919 case TARGET_NR_sched_get_priority_min:
9920 ret = get_errno(sched_get_priority_min(arg1));
9922 case TARGET_NR_sched_rr_get_interval:
9925 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9926 if (!is_error(ret)) {
9927 ret = host_to_target_timespec(arg2, &ts);
9931 case TARGET_NR_nanosleep:
9933 struct timespec req, rem;
9934 target_to_host_timespec(&req, arg1);
9935 ret = get_errno(safe_nanosleep(&req, &rem));
9936 if (is_error(ret) && arg2) {
9937 host_to_target_timespec(arg2, &rem);
9941 #ifdef TARGET_NR_query_module
9942 case TARGET_NR_query_module:
9945 #ifdef TARGET_NR_nfsservctl
9946 case TARGET_NR_nfsservctl:
9949 case TARGET_NR_prctl:
9951 case PR_GET_PDEATHSIG:
9954 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9955 if (!is_error(ret) && arg2
9956 && put_user_ual(deathsig, arg2)) {
9964 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9968 ret = get_errno(prctl(arg1, (unsigned long)name,
9970 unlock_user(name, arg2, 16);
9975 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9979 ret = get_errno(prctl(arg1, (unsigned long)name,
9981 unlock_user(name, arg2, 0);
9986 /* Most prctl options have no pointer arguments */
9987 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9991 #ifdef TARGET_NR_arch_prctl
9992 case TARGET_NR_arch_prctl:
9993 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9994 ret = do_arch_prctl(cpu_env, arg1, arg2);
10000 #ifdef TARGET_NR_pread64
10001 case TARGET_NR_pread64:
10002 if (regpairs_aligned(cpu_env)) {
10006 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10008 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10009 unlock_user(p, arg2, ret);
10011 case TARGET_NR_pwrite64:
10012 if (regpairs_aligned(cpu_env)) {
10016 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10018 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10019 unlock_user(p, arg2, 0);
10022 case TARGET_NR_getcwd:
10023 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10025 ret = get_errno(sys_getcwd1(p, arg2));
10026 unlock_user(p, arg1, ret);
10028 case TARGET_NR_capget:
10029 case TARGET_NR_capset:
10031 struct target_user_cap_header *target_header;
10032 struct target_user_cap_data *target_data = NULL;
10033 struct __user_cap_header_struct header;
10034 struct __user_cap_data_struct data[2];
10035 struct __user_cap_data_struct *dataptr = NULL;
10036 int i, target_datalen;
10037 int data_items = 1;
10039 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10042 header.version = tswap32(target_header->version);
10043 header.pid = tswap32(target_header->pid);
10045 if (header.version != _LINUX_CAPABILITY_VERSION) {
10046 /* Version 2 and up takes pointer to two user_data structs */
10050 target_datalen = sizeof(*target_data) * data_items;
10053 if (num == TARGET_NR_capget) {
10054 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10056 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10058 if (!target_data) {
10059 unlock_user_struct(target_header, arg1, 0);
10063 if (num == TARGET_NR_capset) {
10064 for (i = 0; i < data_items; i++) {
10065 data[i].effective = tswap32(target_data[i].effective);
10066 data[i].permitted = tswap32(target_data[i].permitted);
10067 data[i].inheritable = tswap32(target_data[i].inheritable);
10074 if (num == TARGET_NR_capget) {
10075 ret = get_errno(capget(&header, dataptr));
10077 ret = get_errno(capset(&header, dataptr));
10080 /* The kernel always updates version for both capget and capset */
10081 target_header->version = tswap32(header.version);
10082 unlock_user_struct(target_header, arg1, 1);
10085 if (num == TARGET_NR_capget) {
10086 for (i = 0; i < data_items; i++) {
10087 target_data[i].effective = tswap32(data[i].effective);
10088 target_data[i].permitted = tswap32(data[i].permitted);
10089 target_data[i].inheritable = tswap32(data[i].inheritable);
10091 unlock_user(target_data, arg2, target_datalen);
10093 unlock_user(target_data, arg2, 0);
10098 case TARGET_NR_sigaltstack:
10099 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10102 #ifdef CONFIG_SENDFILE
10103 case TARGET_NR_sendfile:
10105 off_t *offp = NULL;
10108 ret = get_user_sal(off, arg3);
10109 if (is_error(ret)) {
10114 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10115 if (!is_error(ret) && arg3) {
10116 abi_long ret2 = put_user_sal(off, arg3);
10117 if (is_error(ret2)) {
10123 #ifdef TARGET_NR_sendfile64
10124 case TARGET_NR_sendfile64:
10126 off_t *offp = NULL;
10129 ret = get_user_s64(off, arg3);
10130 if (is_error(ret)) {
10135 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10136 if (!is_error(ret) && arg3) {
10137 abi_long ret2 = put_user_s64(off, arg3);
10138 if (is_error(ret2)) {
10146 case TARGET_NR_sendfile:
10147 #ifdef TARGET_NR_sendfile64
10148 case TARGET_NR_sendfile64:
10150 goto unimplemented;
10153 #ifdef TARGET_NR_getpmsg
10154 case TARGET_NR_getpmsg:
10155 goto unimplemented;
10157 #ifdef TARGET_NR_putpmsg
10158 case TARGET_NR_putpmsg:
10159 goto unimplemented;
10161 #ifdef TARGET_NR_vfork
10162 case TARGET_NR_vfork:
10163 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10167 #ifdef TARGET_NR_ugetrlimit
10168 case TARGET_NR_ugetrlimit:
10170 struct rlimit rlim;
10171 int resource = target_to_host_resource(arg1);
10172 ret = get_errno(getrlimit(resource, &rlim));
10173 if (!is_error(ret)) {
10174 struct target_rlimit *target_rlim;
10175 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10177 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10178 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10179 unlock_user_struct(target_rlim, arg2, 1);
10184 #ifdef TARGET_NR_truncate64
10185 case TARGET_NR_truncate64:
10186 if (!(p = lock_user_string(arg1)))
10188 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10189 unlock_user(p, arg1, 0);
10192 #ifdef TARGET_NR_ftruncate64
10193 case TARGET_NR_ftruncate64:
10194 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10197 #ifdef TARGET_NR_stat64
10198 case TARGET_NR_stat64:
10199 if (!(p = lock_user_string(arg1)))
10201 ret = get_errno(stat(path(p), &st));
10202 unlock_user(p, arg1, 0);
10203 if (!is_error(ret))
10204 ret = host_to_target_stat64(cpu_env, arg2, &st);
10207 #ifdef TARGET_NR_lstat64
10208 case TARGET_NR_lstat64:
10209 if (!(p = lock_user_string(arg1)))
10211 ret = get_errno(lstat(path(p), &st));
10212 unlock_user(p, arg1, 0);
10213 if (!is_error(ret))
10214 ret = host_to_target_stat64(cpu_env, arg2, &st);
10217 #ifdef TARGET_NR_fstat64
10218 case TARGET_NR_fstat64:
10219 ret = get_errno(fstat(arg1, &st));
10220 if (!is_error(ret))
10221 ret = host_to_target_stat64(cpu_env, arg2, &st);
10224 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10225 #ifdef TARGET_NR_fstatat64
10226 case TARGET_NR_fstatat64:
10228 #ifdef TARGET_NR_newfstatat
10229 case TARGET_NR_newfstatat:
10231 if (!(p = lock_user_string(arg2)))
10233 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10234 if (!is_error(ret))
10235 ret = host_to_target_stat64(cpu_env, arg3, &st);
10238 #ifdef TARGET_NR_lchown
10239 case TARGET_NR_lchown:
10240 if (!(p = lock_user_string(arg1)))
10242 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10243 unlock_user(p, arg1, 0);
10246 #ifdef TARGET_NR_getuid
10247 case TARGET_NR_getuid:
10248 ret = get_errno(high2lowuid(getuid()));
10251 #ifdef TARGET_NR_getgid
10252 case TARGET_NR_getgid:
10253 ret = get_errno(high2lowgid(getgid()));
10256 #ifdef TARGET_NR_geteuid
10257 case TARGET_NR_geteuid:
10258 ret = get_errno(high2lowuid(geteuid()));
10261 #ifdef TARGET_NR_getegid
10262 case TARGET_NR_getegid:
10263 ret = get_errno(high2lowgid(getegid()));
10266 case TARGET_NR_setreuid:
10267 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10269 case TARGET_NR_setregid:
10270 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10272 case TARGET_NR_getgroups:
10274 int gidsetsize = arg1;
10275 target_id *target_grouplist;
10279 grouplist = alloca(gidsetsize * sizeof(gid_t));
10280 ret = get_errno(getgroups(gidsetsize, grouplist));
10281 if (gidsetsize == 0)
10283 if (!is_error(ret)) {
10284 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10285 if (!target_grouplist)
10287 for(i = 0;i < ret; i++)
10288 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10289 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10293 case TARGET_NR_setgroups:
10295 int gidsetsize = arg1;
10296 target_id *target_grouplist;
10297 gid_t *grouplist = NULL;
10300 grouplist = alloca(gidsetsize * sizeof(gid_t));
10301 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10302 if (!target_grouplist) {
10303 ret = -TARGET_EFAULT;
10306 for (i = 0; i < gidsetsize; i++) {
10307 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10309 unlock_user(target_grouplist, arg2, 0);
10311 ret = get_errno(setgroups(gidsetsize, grouplist));
10314 case TARGET_NR_fchown:
10315 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10317 #if defined(TARGET_NR_fchownat)
10318 case TARGET_NR_fchownat:
10319 if (!(p = lock_user_string(arg2)))
10321 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10322 low2highgid(arg4), arg5));
10323 unlock_user(p, arg2, 0);
10326 #ifdef TARGET_NR_setresuid
10327 case TARGET_NR_setresuid:
10328 ret = get_errno(sys_setresuid(low2highuid(arg1),
10330 low2highuid(arg3)));
10333 #ifdef TARGET_NR_getresuid
10334 case TARGET_NR_getresuid:
10336 uid_t ruid, euid, suid;
10337 ret = get_errno(getresuid(&ruid, &euid, &suid));
10338 if (!is_error(ret)) {
10339 if (put_user_id(high2lowuid(ruid), arg1)
10340 || put_user_id(high2lowuid(euid), arg2)
10341 || put_user_id(high2lowuid(suid), arg3))
10347 #ifdef TARGET_NR_getresgid
10348 case TARGET_NR_setresgid:
10349 ret = get_errno(sys_setresgid(low2highgid(arg1),
10351 low2highgid(arg3)));
10354 #ifdef TARGET_NR_getresgid
10355 case TARGET_NR_getresgid:
10357 gid_t rgid, egid, sgid;
10358 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10359 if (!is_error(ret)) {
10360 if (put_user_id(high2lowgid(rgid), arg1)
10361 || put_user_id(high2lowgid(egid), arg2)
10362 || put_user_id(high2lowgid(sgid), arg3))
10368 #ifdef TARGET_NR_chown
10369 case TARGET_NR_chown:
10370 if (!(p = lock_user_string(arg1)))
10372 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10373 unlock_user(p, arg1, 0);
10376 case TARGET_NR_setuid:
10377 ret = get_errno(sys_setuid(low2highuid(arg1)));
10379 case TARGET_NR_setgid:
10380 ret = get_errno(sys_setgid(low2highgid(arg1)));
10382 case TARGET_NR_setfsuid:
10383 ret = get_errno(setfsuid(arg1));
10385 case TARGET_NR_setfsgid:
10386 ret = get_errno(setfsgid(arg1));
10389 #ifdef TARGET_NR_lchown32
10390 case TARGET_NR_lchown32:
10391 if (!(p = lock_user_string(arg1)))
10393 ret = get_errno(lchown(p, arg2, arg3));
10394 unlock_user(p, arg1, 0);
10397 #ifdef TARGET_NR_getuid32
10398 case TARGET_NR_getuid32:
10399 ret = get_errno(getuid());
10403 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10404 /* Alpha specific */
10405 case TARGET_NR_getxuid:
10409 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10411 ret = get_errno(getuid());
10414 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10415 /* Alpha specific */
10416 case TARGET_NR_getxgid:
10420 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10422 ret = get_errno(getgid());
10425 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10426 /* Alpha specific */
10427 case TARGET_NR_osf_getsysinfo:
10428 ret = -TARGET_EOPNOTSUPP;
10430 case TARGET_GSI_IEEE_FP_CONTROL:
10432 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10434 /* Copied from linux ieee_fpcr_to_swcr. */
10435 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10436 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10437 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10438 | SWCR_TRAP_ENABLE_DZE
10439 | SWCR_TRAP_ENABLE_OVF);
10440 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10441 | SWCR_TRAP_ENABLE_INE);
10442 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10443 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10445 if (put_user_u64 (swcr, arg2))
10451 /* case GSI_IEEE_STATE_AT_SIGNAL:
10452 -- Not implemented in linux kernel.
10454 -- Retrieves current unaligned access state; not much used.
10455 case GSI_PROC_TYPE:
10456 -- Retrieves implver information; surely not used.
10457 case GSI_GET_HWRPB:
10458 -- Grabs a copy of the HWRPB; surely not used.
10463 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10464 /* Alpha specific */
10465 case TARGET_NR_osf_setsysinfo:
10466 ret = -TARGET_EOPNOTSUPP;
10468 case TARGET_SSI_IEEE_FP_CONTROL:
10470 uint64_t swcr, fpcr, orig_fpcr;
10472 if (get_user_u64 (swcr, arg2)) {
10475 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10476 fpcr = orig_fpcr & FPCR_DYN_MASK;
10478 /* Copied from linux ieee_swcr_to_fpcr. */
10479 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10480 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10481 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10482 | SWCR_TRAP_ENABLE_DZE
10483 | SWCR_TRAP_ENABLE_OVF)) << 48;
10484 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10485 | SWCR_TRAP_ENABLE_INE)) << 57;
10486 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10487 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10489 cpu_alpha_store_fpcr(cpu_env, fpcr);
10494 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10496 uint64_t exc, fpcr, orig_fpcr;
10499 if (get_user_u64(exc, arg2)) {
10503 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10505 /* We only add to the exception status here. */
10506 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10508 cpu_alpha_store_fpcr(cpu_env, fpcr);
10511 /* Old exceptions are not signaled. */
10512 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10514 /* If any exceptions set by this call,
10515 and are unmasked, send a signal. */
10517 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10518 si_code = TARGET_FPE_FLTRES;
10520 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10521 si_code = TARGET_FPE_FLTUND;
10523 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10524 si_code = TARGET_FPE_FLTOVF;
10526 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10527 si_code = TARGET_FPE_FLTDIV;
10529 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10530 si_code = TARGET_FPE_FLTINV;
10532 if (si_code != 0) {
10533 target_siginfo_t info;
10534 info.si_signo = SIGFPE;
10536 info.si_code = si_code;
10537 info._sifields._sigfault._addr
10538 = ((CPUArchState *)cpu_env)->pc;
10539 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10544 /* case SSI_NVPAIRS:
10545 -- Used with SSIN_UACPROC to enable unaligned accesses.
10546 case SSI_IEEE_STATE_AT_SIGNAL:
10547 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10548 -- Not implemented in linux kernel
10553 #ifdef TARGET_NR_osf_sigprocmask
10554 /* Alpha specific. */
10555 case TARGET_NR_osf_sigprocmask:
10559 sigset_t set, oldset;
10562 case TARGET_SIG_BLOCK:
10565 case TARGET_SIG_UNBLOCK:
10568 case TARGET_SIG_SETMASK:
10572 ret = -TARGET_EINVAL;
10576 target_to_host_old_sigset(&set, &mask);
10577 ret = do_sigprocmask(how, &set, &oldset);
10579 host_to_target_old_sigset(&mask, &oldset);
10586 #ifdef TARGET_NR_getgid32
10587 case TARGET_NR_getgid32:
10588 ret = get_errno(getgid());
10591 #ifdef TARGET_NR_geteuid32
10592 case TARGET_NR_geteuid32:
10593 ret = get_errno(geteuid());
10596 #ifdef TARGET_NR_getegid32
10597 case TARGET_NR_getegid32:
10598 ret = get_errno(getegid());
10601 #ifdef TARGET_NR_setreuid32
10602 case TARGET_NR_setreuid32:
10603 ret = get_errno(setreuid(arg1, arg2));
10606 #ifdef TARGET_NR_setregid32
10607 case TARGET_NR_setregid32:
10608 ret = get_errno(setregid(arg1, arg2));
10611 #ifdef TARGET_NR_getgroups32
10612 case TARGET_NR_getgroups32:
10614 int gidsetsize = arg1;
10615 uint32_t *target_grouplist;
10619 grouplist = alloca(gidsetsize * sizeof(gid_t));
10620 ret = get_errno(getgroups(gidsetsize, grouplist));
10621 if (gidsetsize == 0)
10623 if (!is_error(ret)) {
10624 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10625 if (!target_grouplist) {
10626 ret = -TARGET_EFAULT;
10629 for(i = 0;i < ret; i++)
10630 target_grouplist[i] = tswap32(grouplist[i]);
10631 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10636 #ifdef TARGET_NR_setgroups32
10637 case TARGET_NR_setgroups32:
10639 int gidsetsize = arg1;
10640 uint32_t *target_grouplist;
10644 grouplist = alloca(gidsetsize * sizeof(gid_t));
10645 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10646 if (!target_grouplist) {
10647 ret = -TARGET_EFAULT;
10650 for(i = 0;i < gidsetsize; i++)
10651 grouplist[i] = tswap32(target_grouplist[i]);
10652 unlock_user(target_grouplist, arg2, 0);
10653 ret = get_errno(setgroups(gidsetsize, grouplist));
10657 #ifdef TARGET_NR_fchown32
10658 case TARGET_NR_fchown32:
10659 ret = get_errno(fchown(arg1, arg2, arg3));
10662 #ifdef TARGET_NR_setresuid32
10663 case TARGET_NR_setresuid32:
10664 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10667 #ifdef TARGET_NR_getresuid32
10668 case TARGET_NR_getresuid32:
10670 uid_t ruid, euid, suid;
10671 ret = get_errno(getresuid(&ruid, &euid, &suid));
10672 if (!is_error(ret)) {
10673 if (put_user_u32(ruid, arg1)
10674 || put_user_u32(euid, arg2)
10675 || put_user_u32(suid, arg3))
10681 #ifdef TARGET_NR_setresgid32
10682 case TARGET_NR_setresgid32:
10683 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10686 #ifdef TARGET_NR_getresgid32
10687 case TARGET_NR_getresgid32:
10689 gid_t rgid, egid, sgid;
10690 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10691 if (!is_error(ret)) {
10692 if (put_user_u32(rgid, arg1)
10693 || put_user_u32(egid, arg2)
10694 || put_user_u32(sgid, arg3))
10700 #ifdef TARGET_NR_chown32
10701 case TARGET_NR_chown32:
10702 if (!(p = lock_user_string(arg1)))
10704 ret = get_errno(chown(p, arg2, arg3));
10705 unlock_user(p, arg1, 0);
10708 #ifdef TARGET_NR_setuid32
10709 case TARGET_NR_setuid32:
10710 ret = get_errno(sys_setuid(arg1));
10713 #ifdef TARGET_NR_setgid32
10714 case TARGET_NR_setgid32:
10715 ret = get_errno(sys_setgid(arg1));
10718 #ifdef TARGET_NR_setfsuid32
10719 case TARGET_NR_setfsuid32:
10720 ret = get_errno(setfsuid(arg1));
10723 #ifdef TARGET_NR_setfsgid32
10724 case TARGET_NR_setfsgid32:
10725 ret = get_errno(setfsgid(arg1));
10729 case TARGET_NR_pivot_root:
10730 goto unimplemented;
10731 #ifdef TARGET_NR_mincore
10732 case TARGET_NR_mincore:
10735 ret = -TARGET_EFAULT;
10736 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10738 if (!(p = lock_user_string(arg3)))
10740 ret = get_errno(mincore(a, arg2, p));
10741 unlock_user(p, arg3, ret);
10743 unlock_user(a, arg1, 0);
10747 #ifdef TARGET_NR_arm_fadvise64_64
10748 case TARGET_NR_arm_fadvise64_64:
10749 /* arm_fadvise64_64 looks like fadvise64_64 but
10750 * with different argument order: fd, advice, offset, len
10751 * rather than the usual fd, offset, len, advice.
10752 * Note that offset and len are both 64-bit so appear as
10753 * pairs of 32-bit registers.
10755 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10756 target_offset64(arg5, arg6), arg2);
10757 ret = -host_to_target_errno(ret);
10761 #if TARGET_ABI_BITS == 32
10763 #ifdef TARGET_NR_fadvise64_64
10764 case TARGET_NR_fadvise64_64:
10765 /* 6 args: fd, offset (high, low), len (high, low), advice */
10766 if (regpairs_aligned(cpu_env)) {
10767 /* offset is in (3,4), len in (5,6) and advice in 7 */
10774 ret = -host_to_target_errno(posix_fadvise(arg1,
10775 target_offset64(arg2, arg3),
10776 target_offset64(arg4, arg5),
10781 #ifdef TARGET_NR_fadvise64
10782 case TARGET_NR_fadvise64:
10783 /* 5 args: fd, offset (high, low), len, advice */
10784 if (regpairs_aligned(cpu_env)) {
10785 /* offset is in (3,4), len in 5 and advice in 6 */
10791 ret = -host_to_target_errno(posix_fadvise(arg1,
10792 target_offset64(arg2, arg3),
10797 #else /* not a 32-bit ABI */
10798 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10799 #ifdef TARGET_NR_fadvise64_64
10800 case TARGET_NR_fadvise64_64:
10802 #ifdef TARGET_NR_fadvise64
10803 case TARGET_NR_fadvise64:
10805 #ifdef TARGET_S390X
10807 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10808 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10809 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10810 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10814 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10817 #endif /* end of 64-bit ABI fadvise handling */
10819 #ifdef TARGET_NR_madvise
10820 case TARGET_NR_madvise:
10821 /* A straight passthrough may not be safe because qemu sometimes
10822 turns private file-backed mappings into anonymous mappings.
10823 This will break MADV_DONTNEED.
10824 This is a hint, so ignoring and returning success is ok. */
10825 ret = get_errno(0);
10828 #if TARGET_ABI_BITS == 32
10829 case TARGET_NR_fcntl64:
10833 from_flock64_fn *copyfrom = copy_from_user_flock64;
10834 to_flock64_fn *copyto = copy_to_user_flock64;
10837 if (((CPUARMState *)cpu_env)->eabi) {
10838 copyfrom = copy_from_user_eabi_flock64;
10839 copyto = copy_to_user_eabi_flock64;
10843 cmd = target_to_host_fcntl_cmd(arg2);
10844 if (cmd == -TARGET_EINVAL) {
10850 case TARGET_F_GETLK64:
10851 ret = copyfrom(&fl, arg3);
10855 ret = get_errno(fcntl(arg1, cmd, &fl));
10857 ret = copyto(arg3, &fl);
10861 case TARGET_F_SETLK64:
10862 case TARGET_F_SETLKW64:
10863 ret = copyfrom(&fl, arg3);
10867 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10870 ret = do_fcntl(arg1, arg2, arg3);
10876 #ifdef TARGET_NR_cacheflush
10877 case TARGET_NR_cacheflush:
10878 /* self-modifying code is handled automatically, so nothing needed */
10882 #ifdef TARGET_NR_security
10883 case TARGET_NR_security:
10884 goto unimplemented;
10886 #ifdef TARGET_NR_getpagesize
10887 case TARGET_NR_getpagesize:
10888 ret = TARGET_PAGE_SIZE;
10891 case TARGET_NR_gettid:
10892 ret = get_errno(gettid());
10894 #ifdef TARGET_NR_readahead
10895 case TARGET_NR_readahead:
10896 #if TARGET_ABI_BITS == 32
10897 if (regpairs_aligned(cpu_env)) {
10902 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10904 ret = get_errno(readahead(arg1, arg2, arg3));
10909 #ifdef TARGET_NR_setxattr
10910 case TARGET_NR_listxattr:
10911 case TARGET_NR_llistxattr:
10915 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10917 ret = -TARGET_EFAULT;
10921 p = lock_user_string(arg1);
10923 if (num == TARGET_NR_listxattr) {
10924 ret = get_errno(listxattr(p, b, arg3));
10926 ret = get_errno(llistxattr(p, b, arg3));
10929 ret = -TARGET_EFAULT;
10931 unlock_user(p, arg1, 0);
10932 unlock_user(b, arg2, arg3);
10935 case TARGET_NR_flistxattr:
10939 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10941 ret = -TARGET_EFAULT;
10945 ret = get_errno(flistxattr(arg1, b, arg3));
10946 unlock_user(b, arg2, arg3);
10949 case TARGET_NR_setxattr:
10950 case TARGET_NR_lsetxattr:
10952 void *p, *n, *v = 0;
10954 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10956 ret = -TARGET_EFAULT;
10960 p = lock_user_string(arg1);
10961 n = lock_user_string(arg2);
10963 if (num == TARGET_NR_setxattr) {
10964 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10966 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10969 ret = -TARGET_EFAULT;
10971 unlock_user(p, arg1, 0);
10972 unlock_user(n, arg2, 0);
10973 unlock_user(v, arg3, 0);
10976 case TARGET_NR_fsetxattr:
10980 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10982 ret = -TARGET_EFAULT;
10986 n = lock_user_string(arg2);
10988 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10990 ret = -TARGET_EFAULT;
10992 unlock_user(n, arg2, 0);
10993 unlock_user(v, arg3, 0);
10996 case TARGET_NR_getxattr:
10997 case TARGET_NR_lgetxattr:
10999 void *p, *n, *v = 0;
11001 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11003 ret = -TARGET_EFAULT;
11007 p = lock_user_string(arg1);
11008 n = lock_user_string(arg2);
11010 if (num == TARGET_NR_getxattr) {
11011 ret = get_errno(getxattr(p, n, v, arg4));
11013 ret = get_errno(lgetxattr(p, n, v, arg4));
11016 ret = -TARGET_EFAULT;
11018 unlock_user(p, arg1, 0);
11019 unlock_user(n, arg2, 0);
11020 unlock_user(v, arg3, arg4);
11023 case TARGET_NR_fgetxattr:
11027 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11029 ret = -TARGET_EFAULT;
11033 n = lock_user_string(arg2);
11035 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11037 ret = -TARGET_EFAULT;
11039 unlock_user(n, arg2, 0);
11040 unlock_user(v, arg3, arg4);
11043 case TARGET_NR_removexattr:
11044 case TARGET_NR_lremovexattr:
11047 p = lock_user_string(arg1);
11048 n = lock_user_string(arg2);
11050 if (num == TARGET_NR_removexattr) {
11051 ret = get_errno(removexattr(p, n));
11053 ret = get_errno(lremovexattr(p, n));
11056 ret = -TARGET_EFAULT;
11058 unlock_user(p, arg1, 0);
11059 unlock_user(n, arg2, 0);
11062 case TARGET_NR_fremovexattr:
11065 n = lock_user_string(arg2);
11067 ret = get_errno(fremovexattr(arg1, n));
11069 ret = -TARGET_EFAULT;
11071 unlock_user(n, arg2, 0);
11075 #endif /* CONFIG_ATTR */
11076 #ifdef TARGET_NR_set_thread_area
11077 case TARGET_NR_set_thread_area:
11078 #if defined(TARGET_MIPS)
11079 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11082 #elif defined(TARGET_CRIS)
11084 ret = -TARGET_EINVAL;
11086 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11090 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11091 ret = do_set_thread_area(cpu_env, arg1);
11093 #elif defined(TARGET_M68K)
11095 TaskState *ts = cpu->opaque;
11096 ts->tp_value = arg1;
11101 goto unimplemented_nowarn;
11104 #ifdef TARGET_NR_get_thread_area
11105 case TARGET_NR_get_thread_area:
11106 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11107 ret = do_get_thread_area(cpu_env, arg1);
11109 #elif defined(TARGET_M68K)
11111 TaskState *ts = cpu->opaque;
11112 ret = ts->tp_value;
11116 goto unimplemented_nowarn;
11119 #ifdef TARGET_NR_getdomainname
11120 case TARGET_NR_getdomainname:
11121 goto unimplemented_nowarn;
11124 #ifdef TARGET_NR_clock_gettime
11125 case TARGET_NR_clock_gettime:
11127 struct timespec ts;
11128 ret = get_errno(clock_gettime(arg1, &ts));
11129 if (!is_error(ret)) {
11130 host_to_target_timespec(arg2, &ts);
11135 #ifdef TARGET_NR_clock_getres
11136 case TARGET_NR_clock_getres:
11138 struct timespec ts;
11139 ret = get_errno(clock_getres(arg1, &ts));
11140 if (!is_error(ret)) {
11141 host_to_target_timespec(arg2, &ts);
11146 #ifdef TARGET_NR_clock_nanosleep
11147 case TARGET_NR_clock_nanosleep:
11149 struct timespec ts;
11150 target_to_host_timespec(&ts, arg3);
11151 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11152 &ts, arg4 ? &ts : NULL));
11154 host_to_target_timespec(arg4, &ts);
11156 #if defined(TARGET_PPC)
11157 /* clock_nanosleep is odd in that it returns positive errno values.
11158 * On PPC, CR0 bit 3 should be set in such a situation. */
11159 if (ret && ret != -TARGET_ERESTARTSYS) {
11160 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11167 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11168 case TARGET_NR_set_tid_address:
11169 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11173 case TARGET_NR_tkill:
11174 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11177 case TARGET_NR_tgkill:
11178 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11179 target_to_host_signal(arg3)));
11182 #ifdef TARGET_NR_set_robust_list
11183 case TARGET_NR_set_robust_list:
11184 case TARGET_NR_get_robust_list:
11185 /* The ABI for supporting robust futexes has userspace pass
11186 * the kernel a pointer to a linked list which is updated by
11187 * userspace after the syscall; the list is walked by the kernel
11188 * when the thread exits. Since the linked list in QEMU guest
11189 * memory isn't a valid linked list for the host and we have
11190 * no way to reliably intercept the thread-death event, we can't
11191 * support these. Silently return ENOSYS so that guest userspace
11192 * falls back to a non-robust futex implementation (which should
11193 * be OK except in the corner case of the guest crashing while
11194 * holding a mutex that is shared with another process via
11197 goto unimplemented_nowarn;
11200 #if defined(TARGET_NR_utimensat)
11201 case TARGET_NR_utimensat:
11203 struct timespec *tsp, ts[2];
11207 target_to_host_timespec(ts, arg3);
11208 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11212 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11214 if (!(p = lock_user_string(arg2))) {
11215 ret = -TARGET_EFAULT;
11218 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11219 unlock_user(p, arg2, 0);
11224 case TARGET_NR_futex:
11225 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11227 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11228 case TARGET_NR_inotify_init:
11229 ret = get_errno(sys_inotify_init());
11232 #ifdef CONFIG_INOTIFY1
11233 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11234 case TARGET_NR_inotify_init1:
11235 ret = get_errno(sys_inotify_init1(arg1));
11239 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11240 case TARGET_NR_inotify_add_watch:
11241 p = lock_user_string(arg2);
11242 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11243 unlock_user(p, arg2, 0);
11246 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11247 case TARGET_NR_inotify_rm_watch:
11248 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11252 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11253 case TARGET_NR_mq_open:
11255 struct mq_attr posix_mq_attr, *attrp;
11257 p = lock_user_string(arg1 - 1);
11259 copy_from_user_mq_attr (&posix_mq_attr, arg4);
11260 attrp = &posix_mq_attr;
11264 ret = get_errno(mq_open(p, arg2, arg3, attrp));
11265 unlock_user (p, arg1, 0);
11269 case TARGET_NR_mq_unlink:
11270 p = lock_user_string(arg1 - 1);
11271 ret = get_errno(mq_unlink(p));
11272 unlock_user (p, arg1, 0);
11275 case TARGET_NR_mq_timedsend:
11277 struct timespec ts;
11279 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11281 target_to_host_timespec(&ts, arg5);
11282 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11283 host_to_target_timespec(arg5, &ts);
11285 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11287 unlock_user (p, arg2, arg3);
11291 case TARGET_NR_mq_timedreceive:
11293 struct timespec ts;
11296 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11298 target_to_host_timespec(&ts, arg5);
11299 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11301 host_to_target_timespec(arg5, &ts);
11303 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11306 unlock_user (p, arg2, arg3);
11308 put_user_u32(prio, arg4);
11312 /* Not implemented for now... */
11313 /* case TARGET_NR_mq_notify: */
11316 case TARGET_NR_mq_getsetattr:
11318 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11321 ret = mq_getattr(arg1, &posix_mq_attr_out);
11322 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11325 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11326 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11333 #ifdef CONFIG_SPLICE
11334 #ifdef TARGET_NR_tee
11335 case TARGET_NR_tee:
11337 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11341 #ifdef TARGET_NR_splice
11342 case TARGET_NR_splice:
11344 loff_t loff_in, loff_out;
11345 loff_t *ploff_in = NULL, *ploff_out = NULL;
11347 if (get_user_u64(loff_in, arg2)) {
11350 ploff_in = &loff_in;
11353 if (get_user_u64(loff_out, arg4)) {
11356 ploff_out = &loff_out;
11358 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11360 if (put_user_u64(loff_in, arg2)) {
11365 if (put_user_u64(loff_out, arg4)) {
11372 #ifdef TARGET_NR_vmsplice
11373 case TARGET_NR_vmsplice:
11375 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11377 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11378 unlock_iovec(vec, arg2, arg3, 0);
11380 ret = -host_to_target_errno(errno);
11385 #endif /* CONFIG_SPLICE */
11386 #ifdef CONFIG_EVENTFD
11387 #if defined(TARGET_NR_eventfd)
11388 case TARGET_NR_eventfd:
11389 ret = get_errno(eventfd(arg1, 0));
11390 fd_trans_unregister(ret);
11393 #if defined(TARGET_NR_eventfd2)
11394 case TARGET_NR_eventfd2:
11396 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11397 if (arg2 & TARGET_O_NONBLOCK) {
11398 host_flags |= O_NONBLOCK;
11400 if (arg2 & TARGET_O_CLOEXEC) {
11401 host_flags |= O_CLOEXEC;
11403 ret = get_errno(eventfd(arg1, host_flags));
11404 fd_trans_unregister(ret);
11408 #endif /* CONFIG_EVENTFD */
11409 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11410 case TARGET_NR_fallocate:
11411 #if TARGET_ABI_BITS == 32
11412 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11413 target_offset64(arg5, arg6)));
11415 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11419 #if defined(CONFIG_SYNC_FILE_RANGE)
11420 #if defined(TARGET_NR_sync_file_range)
11421 case TARGET_NR_sync_file_range:
11422 #if TARGET_ABI_BITS == 32
11423 #if defined(TARGET_MIPS)
11424 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11425 target_offset64(arg5, arg6), arg7));
11427 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11428 target_offset64(arg4, arg5), arg6));
11429 #endif /* !TARGET_MIPS */
11431 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11435 #if defined(TARGET_NR_sync_file_range2)
11436 case TARGET_NR_sync_file_range2:
11437 /* This is like sync_file_range but the arguments are reordered */
11438 #if TARGET_ABI_BITS == 32
11439 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11440 target_offset64(arg5, arg6), arg2));
11442 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11447 #if defined(TARGET_NR_signalfd4)
11448 case TARGET_NR_signalfd4:
11449 ret = do_signalfd4(arg1, arg2, arg4);
11452 #if defined(TARGET_NR_signalfd)
11453 case TARGET_NR_signalfd:
11454 ret = do_signalfd4(arg1, arg2, 0);
11457 #if defined(CONFIG_EPOLL)
11458 #if defined(TARGET_NR_epoll_create)
11459 case TARGET_NR_epoll_create:
11460 ret = get_errno(epoll_create(arg1));
11463 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11464 case TARGET_NR_epoll_create1:
11465 ret = get_errno(epoll_create1(arg1));
11468 #if defined(TARGET_NR_epoll_ctl)
11469 case TARGET_NR_epoll_ctl:
11471 struct epoll_event ep;
11472 struct epoll_event *epp = 0;
11474 struct target_epoll_event *target_ep;
11475 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11478 ep.events = tswap32(target_ep->events);
11479 /* The epoll_data_t union is just opaque data to the kernel,
11480 * so we transfer all 64 bits across and need not worry what
11481 * actual data type it is.
11483 ep.data.u64 = tswap64(target_ep->data.u64);
11484 unlock_user_struct(target_ep, arg4, 0);
11487 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11492 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11493 #if defined(TARGET_NR_epoll_wait)
11494 case TARGET_NR_epoll_wait:
11496 #if defined(TARGET_NR_epoll_pwait)
11497 case TARGET_NR_epoll_pwait:
11500 struct target_epoll_event *target_ep;
11501 struct epoll_event *ep;
11503 int maxevents = arg3;
11504 int timeout = arg4;
11506 target_ep = lock_user(VERIFY_WRITE, arg2,
11507 maxevents * sizeof(struct target_epoll_event), 1);
11512 ep = alloca(maxevents * sizeof(struct epoll_event));
11515 #if defined(TARGET_NR_epoll_pwait)
11516 case TARGET_NR_epoll_pwait:
11518 target_sigset_t *target_set;
11519 sigset_t _set, *set = &_set;
11522 if (arg6 != sizeof(target_sigset_t)) {
11523 ret = -TARGET_EINVAL;
11527 target_set = lock_user(VERIFY_READ, arg5,
11528 sizeof(target_sigset_t), 1);
11530 unlock_user(target_ep, arg2, 0);
11533 target_to_host_sigset(set, target_set);
11534 unlock_user(target_set, arg5, 0);
11539 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11540 set, SIGSET_T_SIZE));
11544 #if defined(TARGET_NR_epoll_wait)
11545 case TARGET_NR_epoll_wait:
11546 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11551 ret = -TARGET_ENOSYS;
11553 if (!is_error(ret)) {
11555 for (i = 0; i < ret; i++) {
11556 target_ep[i].events = tswap32(ep[i].events);
11557 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11560 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11565 #ifdef TARGET_NR_prlimit64
11566 case TARGET_NR_prlimit64:
11568 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11569 struct target_rlimit64 *target_rnew, *target_rold;
11570 struct host_rlimit64 rnew, rold, *rnewp = 0;
11571 int resource = target_to_host_resource(arg2);
11573 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11576 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11577 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11578 unlock_user_struct(target_rnew, arg3, 0);
11582 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11583 if (!is_error(ret) && arg4) {
11584 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11587 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11588 target_rold->rlim_max = tswap64(rold.rlim_max);
11589 unlock_user_struct(target_rold, arg4, 1);
11594 #ifdef TARGET_NR_gethostname
11595 case TARGET_NR_gethostname:
11597 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11599 ret = get_errno(gethostname(name, arg2));
11600 unlock_user(name, arg1, arg2);
11602 ret = -TARGET_EFAULT;
11607 #ifdef TARGET_NR_atomic_cmpxchg_32
11608 case TARGET_NR_atomic_cmpxchg_32:
11610 /* should use start_exclusive from main.c */
11611 abi_ulong mem_value;
11612 if (get_user_u32(mem_value, arg6)) {
11613 target_siginfo_t info;
11614 info.si_signo = SIGSEGV;
11616 info.si_code = TARGET_SEGV_MAPERR;
11617 info._sifields._sigfault._addr = arg6;
11618 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
11622 if (mem_value == arg2)
11623 put_user_u32(arg1, arg6);
11628 #ifdef TARGET_NR_atomic_barrier
11629 case TARGET_NR_atomic_barrier:
11631 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11637 #ifdef TARGET_NR_timer_create
11638 case TARGET_NR_timer_create:
11640 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11642 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11645 int timer_index = next_free_host_timer();
11647 if (timer_index < 0) {
11648 ret = -TARGET_EAGAIN;
11650 timer_t *phtimer = g_posix_timers + timer_index;
11653 phost_sevp = &host_sevp;
11654 ret = target_to_host_sigevent(phost_sevp, arg2);
11660 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11664 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11673 #ifdef TARGET_NR_timer_settime
11674 case TARGET_NR_timer_settime:
11676 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11677 * struct itimerspec * old_value */
11678 target_timer_t timerid = get_timer_id(arg1);
11682 } else if (arg3 == 0) {
11683 ret = -TARGET_EINVAL;
11685 timer_t htimer = g_posix_timers[timerid];
11686 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11688 target_to_host_itimerspec(&hspec_new, arg3);
11690 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11691 host_to_target_itimerspec(arg2, &hspec_old);
11697 #ifdef TARGET_NR_timer_gettime
11698 case TARGET_NR_timer_gettime:
11700 /* args: timer_t timerid, struct itimerspec *curr_value */
11701 target_timer_t timerid = get_timer_id(arg1);
11705 } else if (!arg2) {
11706 ret = -TARGET_EFAULT;
11708 timer_t htimer = g_posix_timers[timerid];
11709 struct itimerspec hspec;
11710 ret = get_errno(timer_gettime(htimer, &hspec));
11712 if (host_to_target_itimerspec(arg2, &hspec)) {
11713 ret = -TARGET_EFAULT;
11720 #ifdef TARGET_NR_timer_getoverrun
11721 case TARGET_NR_timer_getoverrun:
11723 /* args: timer_t timerid */
11724 target_timer_t timerid = get_timer_id(arg1);
11729 timer_t htimer = g_posix_timers[timerid];
11730 ret = get_errno(timer_getoverrun(htimer));
11732 fd_trans_unregister(ret);
11737 #ifdef TARGET_NR_timer_delete
11738 case TARGET_NR_timer_delete:
11740 /* args: timer_t timerid */
11741 target_timer_t timerid = get_timer_id(arg1);
11746 timer_t htimer = g_posix_timers[timerid];
11747 ret = get_errno(timer_delete(htimer));
11748 g_posix_timers[timerid] = 0;
11754 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11755 case TARGET_NR_timerfd_create:
11756 ret = get_errno(timerfd_create(arg1,
11757 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11761 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11762 case TARGET_NR_timerfd_gettime:
11764 struct itimerspec its_curr;
11766 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11768 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11775 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11776 case TARGET_NR_timerfd_settime:
11778 struct itimerspec its_new, its_old, *p_new;
11781 if (target_to_host_itimerspec(&its_new, arg3)) {
11789 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11791 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11798 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11799 case TARGET_NR_ioprio_get:
11800 ret = get_errno(ioprio_get(arg1, arg2));
11804 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11805 case TARGET_NR_ioprio_set:
11806 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11810 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11811 case TARGET_NR_setns:
11812 ret = get_errno(setns(arg1, arg2));
11815 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11816 case TARGET_NR_unshare:
11817 ret = get_errno(unshare(arg1));
11823 gemu_log("qemu: Unsupported syscall: %d\n", num);
11824 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11825 unimplemented_nowarn:
11827 ret = -TARGET_ENOSYS;
11832 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11835 print_syscall_ret(num, ret);
11836 trace_guest_user_syscall_ret(cpu, num, ret);
11839 ret = -TARGET_EFAULT;