4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include "qemu-common.h"
62 #include <sys/timerfd.h>
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #include "qemu/xattr.h"
76 #ifdef CONFIG_SENDFILE
77 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
95 #if defined(CONFIG_FIEMAP)
96 #include <linux/fiemap.h>
100 #include <linux/dm-ioctl.h>
101 #include <linux/reboot.h>
102 #include <linux/route.h>
103 #include <linux/filter.h>
104 #include <linux/blkpg.h>
105 #include <netpacket/packet.h>
106 #include <linux/netlink.h>
107 #ifdef CONFIG_RTNETLINK
108 #include <linux/rtnetlink.h>
109 #include <linux/if_bridge.h>
111 #include <linux/audit.h>
112 #include "linux_loop.h"
118 #define CLONE_IO 0x80000000 /* Clone io context */
121 /* We can't directly call the host clone syscall, because this will
122 * badly confuse libc (breaking mutexes, for example). So we must
123 * divide clone flags into:
124 * * flag combinations that look like pthread_create()
125 * * flag combinations that look like fork()
126 * * flags we can implement within QEMU itself
127 * * flags we can't support and will return an error for
129 /* For thread creation, all these flags must be present; for
130 * fork, none must be present.
132 #define CLONE_THREAD_FLAGS \
133 (CLONE_VM | CLONE_FS | CLONE_FILES | \
134 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
136 /* These flags are ignored:
137 * CLONE_DETACHED is now ignored by the kernel;
138 * CLONE_IO is just an optimisation hint to the I/O scheduler
140 #define CLONE_IGNORED_FLAGS \
141 (CLONE_DETACHED | CLONE_IO)
143 /* Flags for fork which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_FORK_FLAGS \
145 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
146 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
148 /* Flags for thread creation which we can implement within QEMU itself */
149 #define CLONE_OPTIONAL_THREAD_FLAGS \
150 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
151 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
153 #define CLONE_INVALID_FORK_FLAGS \
154 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
156 #define CLONE_INVALID_THREAD_FLAGS \
157 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
158 CLONE_IGNORED_FLAGS))
160 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
161 * have almost all been allocated. We cannot support any of
162 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
163 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
164 * The checks against the invalid thread masks above will catch these.
165 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
169 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
170 * once. This exercises the codepaths for restart.
172 //#define DEBUG_ERESTARTSYS
174 //#include <linux/msdos_fs.h>
175 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
176 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
186 #define _syscall0(type,name) \
187 static type name (void) \
189 return syscall(__NR_##name); \
192 #define _syscall1(type,name,type1,arg1) \
193 static type name (type1 arg1) \
195 return syscall(__NR_##name, arg1); \
198 #define _syscall2(type,name,type1,arg1,type2,arg2) \
199 static type name (type1 arg1,type2 arg2) \
201 return syscall(__NR_##name, arg1, arg2); \
204 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
205 static type name (type1 arg1,type2 arg2,type3 arg3) \
207 return syscall(__NR_##name, arg1, arg2, arg3); \
210 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
213 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
216 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
220 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
224 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
225 type5,arg5,type6,arg6) \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
229 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
233 #define __NR_sys_uname __NR_uname
234 #define __NR_sys_getcwd1 __NR_getcwd
235 #define __NR_sys_getdents __NR_getdents
236 #define __NR_sys_getdents64 __NR_getdents64
237 #define __NR_sys_getpriority __NR_getpriority
238 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
239 #define __NR_sys_syslog __NR_syslog
240 #define __NR_sys_futex __NR_futex
241 #define __NR_sys_inotify_init __NR_inotify_init
242 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
243 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
245 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
247 #define __NR__llseek __NR_lseek
250 /* Newer kernel ports have llseek() instead of _llseek() */
251 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
252 #define TARGET_NR__llseek TARGET_NR_llseek
256 _syscall0(int, gettid)
258 /* This is a replacement for the host gettid() and must return a host
260 static int gettid(void) {
264 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
265 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
267 #if !defined(__NR_getdents) || \
268 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
269 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
271 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
272 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
273 loff_t *, res, uint, wh);
275 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
276 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group,int,error_code)
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address,int *,tidptr)
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
285 const struct timespec *,timeout,int *,uaddr2,int,val3)
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
289 unsigned long *, user_mask_ptr);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
292 unsigned long *, user_mask_ptr);
293 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
295 _syscall2(int, capget, struct __user_cap_header_struct *, header,
296 struct __user_cap_data_struct *, data);
297 _syscall2(int, capset, struct __user_cap_header_struct *, header,
298 struct __user_cap_data_struct *, data);
299 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
300 _syscall2(int, ioprio_get, int, which, int, who)
302 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
303 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
305 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
306 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
309 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
310 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
311 unsigned long, idx1, unsigned long, idx2)
314 static bitmask_transtbl fcntl_flags_tbl[] = {
315 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
316 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
317 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
318 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
319 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
320 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
321 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
322 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
323 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
324 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
325 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
326 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
327 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
328 #if defined(O_DIRECT)
329 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
331 #if defined(O_NOATIME)
332 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
334 #if defined(O_CLOEXEC)
335 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
338 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
340 /* Don't terminate the list prematurely on 64-bit host+guest. */
341 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
342 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
349 QEMU_IFLA_BR_FORWARD_DELAY,
350 QEMU_IFLA_BR_HELLO_TIME,
351 QEMU_IFLA_BR_MAX_AGE,
352 QEMU_IFLA_BR_AGEING_TIME,
353 QEMU_IFLA_BR_STP_STATE,
354 QEMU_IFLA_BR_PRIORITY,
355 QEMU_IFLA_BR_VLAN_FILTERING,
356 QEMU_IFLA_BR_VLAN_PROTOCOL,
357 QEMU_IFLA_BR_GROUP_FWD_MASK,
358 QEMU_IFLA_BR_ROOT_ID,
359 QEMU_IFLA_BR_BRIDGE_ID,
360 QEMU_IFLA_BR_ROOT_PORT,
361 QEMU_IFLA_BR_ROOT_PATH_COST,
362 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
363 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
364 QEMU_IFLA_BR_HELLO_TIMER,
365 QEMU_IFLA_BR_TCN_TIMER,
366 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
367 QEMU_IFLA_BR_GC_TIMER,
368 QEMU_IFLA_BR_GROUP_ADDR,
369 QEMU_IFLA_BR_FDB_FLUSH,
370 QEMU_IFLA_BR_MCAST_ROUTER,
371 QEMU_IFLA_BR_MCAST_SNOOPING,
372 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
373 QEMU_IFLA_BR_MCAST_QUERIER,
374 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
375 QEMU_IFLA_BR_MCAST_HASH_MAX,
376 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
377 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
378 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
379 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
380 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
381 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
382 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
383 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
384 QEMU_IFLA_BR_NF_CALL_IPTABLES,
385 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
386 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
387 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
389 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
390 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
414 QEMU_IFLA_NET_NS_PID,
417 QEMU_IFLA_VFINFO_LIST,
425 QEMU_IFLA_PROMISCUITY,
426 QEMU_IFLA_NUM_TX_QUEUES,
427 QEMU_IFLA_NUM_RX_QUEUES,
429 QEMU_IFLA_PHYS_PORT_ID,
430 QEMU_IFLA_CARRIER_CHANGES,
431 QEMU_IFLA_PHYS_SWITCH_ID,
432 QEMU_IFLA_LINK_NETNSID,
433 QEMU_IFLA_PHYS_PORT_NAME,
434 QEMU_IFLA_PROTO_DOWN,
435 QEMU_IFLA_GSO_MAX_SEGS,
436 QEMU_IFLA_GSO_MAX_SIZE,
443 QEMU_IFLA_BRPORT_UNSPEC,
444 QEMU_IFLA_BRPORT_STATE,
445 QEMU_IFLA_BRPORT_PRIORITY,
446 QEMU_IFLA_BRPORT_COST,
447 QEMU_IFLA_BRPORT_MODE,
448 QEMU_IFLA_BRPORT_GUARD,
449 QEMU_IFLA_BRPORT_PROTECT,
450 QEMU_IFLA_BRPORT_FAST_LEAVE,
451 QEMU_IFLA_BRPORT_LEARNING,
452 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
453 QEMU_IFLA_BRPORT_PROXYARP,
454 QEMU_IFLA_BRPORT_LEARNING_SYNC,
455 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
456 QEMU_IFLA_BRPORT_ROOT_ID,
457 QEMU_IFLA_BRPORT_BRIDGE_ID,
458 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
459 QEMU_IFLA_BRPORT_DESIGNATED_COST,
462 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
463 QEMU_IFLA_BRPORT_CONFIG_PENDING,
464 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
465 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
466 QEMU_IFLA_BRPORT_HOLD_TIMER,
467 QEMU_IFLA_BRPORT_FLUSH,
468 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
469 QEMU_IFLA_BRPORT_PAD,
470 QEMU___IFLA_BRPORT_MAX
474 QEMU_IFLA_INFO_UNSPEC,
477 QEMU_IFLA_INFO_XSTATS,
478 QEMU_IFLA_INFO_SLAVE_KIND,
479 QEMU_IFLA_INFO_SLAVE_DATA,
480 QEMU___IFLA_INFO_MAX,
484 QEMU_IFLA_INET_UNSPEC,
486 QEMU___IFLA_INET_MAX,
490 QEMU_IFLA_INET6_UNSPEC,
491 QEMU_IFLA_INET6_FLAGS,
492 QEMU_IFLA_INET6_CONF,
493 QEMU_IFLA_INET6_STATS,
494 QEMU_IFLA_INET6_MCAST,
495 QEMU_IFLA_INET6_CACHEINFO,
496 QEMU_IFLA_INET6_ICMP6STATS,
497 QEMU_IFLA_INET6_TOKEN,
498 QEMU_IFLA_INET6_ADDR_GEN_MODE,
499 QEMU___IFLA_INET6_MAX
502 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
503 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
504 typedef struct TargetFdTrans {
505 TargetFdDataFunc host_to_target_data;
506 TargetFdDataFunc target_to_host_data;
507 TargetFdAddrFunc target_to_host_addr;
510 static TargetFdTrans **target_fd_trans;
512 static unsigned int target_fd_max;
514 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
516 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
517 return target_fd_trans[fd]->target_to_host_data;
522 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
524 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
525 return target_fd_trans[fd]->host_to_target_data;
530 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
532 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
533 return target_fd_trans[fd]->target_to_host_addr;
538 static void fd_trans_register(int fd, TargetFdTrans *trans)
542 if (fd >= target_fd_max) {
543 oldmax = target_fd_max;
544 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
545 target_fd_trans = g_renew(TargetFdTrans *,
546 target_fd_trans, target_fd_max);
547 memset((void *)(target_fd_trans + oldmax), 0,
548 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
550 target_fd_trans[fd] = trans;
553 static void fd_trans_unregister(int fd)
555 if (fd >= 0 && fd < target_fd_max) {
556 target_fd_trans[fd] = NULL;
560 static void fd_trans_dup(int oldfd, int newfd)
562 fd_trans_unregister(newfd);
563 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
564 fd_trans_register(newfd, target_fd_trans[oldfd]);
568 static int sys_getcwd1(char *buf, size_t size)
570 if (getcwd(buf, size) == NULL) {
571 /* getcwd() sets errno */
574 return strlen(buf)+1;
577 #ifdef TARGET_NR_utimensat
578 #if defined(__NR_utimensat)
579 #define __NR_sys_utimensat __NR_utimensat
580 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
581 const struct timespec *,tsp,int,flags)
583 static int sys_utimensat(int dirfd, const char *pathname,
584 const struct timespec times[2], int flags)
590 #endif /* TARGET_NR_utimensat */
592 #ifdef CONFIG_INOTIFY
593 #include <sys/inotify.h>
595 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
596 static int sys_inotify_init(void)
598 return (inotify_init());
601 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
602 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
604 return (inotify_add_watch(fd, pathname, mask));
607 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
608 static int sys_inotify_rm_watch(int fd, int32_t wd)
610 return (inotify_rm_watch(fd, wd));
613 #ifdef CONFIG_INOTIFY1
614 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
615 static int sys_inotify_init1(int flags)
617 return (inotify_init1(flags));
622 /* Userspace can usually survive runtime without inotify */
623 #undef TARGET_NR_inotify_init
624 #undef TARGET_NR_inotify_init1
625 #undef TARGET_NR_inotify_add_watch
626 #undef TARGET_NR_inotify_rm_watch
627 #endif /* CONFIG_INOTIFY */
629 #if defined(TARGET_NR_prlimit64)
630 #ifndef __NR_prlimit64
631 # define __NR_prlimit64 -1
633 #define __NR_sys_prlimit64 __NR_prlimit64
634 /* The glibc rlimit structure may not be that used by the underlying syscall */
635 struct host_rlimit64 {
639 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
640 const struct host_rlimit64 *, new_limit,
641 struct host_rlimit64 *, old_limit)
645 #if defined(TARGET_NR_timer_create)
646 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
647 static timer_t g_posix_timers[32] = { 0, } ;
649 static inline int next_free_host_timer(void)
652 /* FIXME: Does finding the next free slot require a lock? */
653 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
654 if (g_posix_timers[k] == 0) {
655 g_posix_timers[k] = (timer_t) 1;
663 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
665 static inline int regpairs_aligned(void *cpu_env) {
666 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
668 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
669 static inline int regpairs_aligned(void *cpu_env) { return 1; }
670 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
671 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
672 * of registers which translates to the same as ARM/MIPS, because we start with
674 static inline int regpairs_aligned(void *cpu_env) { return 1; }
676 static inline int regpairs_aligned(void *cpu_env) { return 0; }
679 #define ERRNO_TABLE_SIZE 1200
681 /* target_to_host_errno_table[] is initialized from
682 * host_to_target_errno_table[] in syscall_init(). */
683 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
687 * This list is the union of errno values overridden in asm-<arch>/errno.h
688 * minus the errnos that are not actually generic to all archs.
690 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
691 [EAGAIN] = TARGET_EAGAIN,
692 [EIDRM] = TARGET_EIDRM,
693 [ECHRNG] = TARGET_ECHRNG,
694 [EL2NSYNC] = TARGET_EL2NSYNC,
695 [EL3HLT] = TARGET_EL3HLT,
696 [EL3RST] = TARGET_EL3RST,
697 [ELNRNG] = TARGET_ELNRNG,
698 [EUNATCH] = TARGET_EUNATCH,
699 [ENOCSI] = TARGET_ENOCSI,
700 [EL2HLT] = TARGET_EL2HLT,
701 [EDEADLK] = TARGET_EDEADLK,
702 [ENOLCK] = TARGET_ENOLCK,
703 [EBADE] = TARGET_EBADE,
704 [EBADR] = TARGET_EBADR,
705 [EXFULL] = TARGET_EXFULL,
706 [ENOANO] = TARGET_ENOANO,
707 [EBADRQC] = TARGET_EBADRQC,
708 [EBADSLT] = TARGET_EBADSLT,
709 [EBFONT] = TARGET_EBFONT,
710 [ENOSTR] = TARGET_ENOSTR,
711 [ENODATA] = TARGET_ENODATA,
712 [ETIME] = TARGET_ETIME,
713 [ENOSR] = TARGET_ENOSR,
714 [ENONET] = TARGET_ENONET,
715 [ENOPKG] = TARGET_ENOPKG,
716 [EREMOTE] = TARGET_EREMOTE,
717 [ENOLINK] = TARGET_ENOLINK,
718 [EADV] = TARGET_EADV,
719 [ESRMNT] = TARGET_ESRMNT,
720 [ECOMM] = TARGET_ECOMM,
721 [EPROTO] = TARGET_EPROTO,
722 [EDOTDOT] = TARGET_EDOTDOT,
723 [EMULTIHOP] = TARGET_EMULTIHOP,
724 [EBADMSG] = TARGET_EBADMSG,
725 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
726 [EOVERFLOW] = TARGET_EOVERFLOW,
727 [ENOTUNIQ] = TARGET_ENOTUNIQ,
728 [EBADFD] = TARGET_EBADFD,
729 [EREMCHG] = TARGET_EREMCHG,
730 [ELIBACC] = TARGET_ELIBACC,
731 [ELIBBAD] = TARGET_ELIBBAD,
732 [ELIBSCN] = TARGET_ELIBSCN,
733 [ELIBMAX] = TARGET_ELIBMAX,
734 [ELIBEXEC] = TARGET_ELIBEXEC,
735 [EILSEQ] = TARGET_EILSEQ,
736 [ENOSYS] = TARGET_ENOSYS,
737 [ELOOP] = TARGET_ELOOP,
738 [ERESTART] = TARGET_ERESTART,
739 [ESTRPIPE] = TARGET_ESTRPIPE,
740 [ENOTEMPTY] = TARGET_ENOTEMPTY,
741 [EUSERS] = TARGET_EUSERS,
742 [ENOTSOCK] = TARGET_ENOTSOCK,
743 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
744 [EMSGSIZE] = TARGET_EMSGSIZE,
745 [EPROTOTYPE] = TARGET_EPROTOTYPE,
746 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
747 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
748 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
749 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
750 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
751 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
752 [EADDRINUSE] = TARGET_EADDRINUSE,
753 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
754 [ENETDOWN] = TARGET_ENETDOWN,
755 [ENETUNREACH] = TARGET_ENETUNREACH,
756 [ENETRESET] = TARGET_ENETRESET,
757 [ECONNABORTED] = TARGET_ECONNABORTED,
758 [ECONNRESET] = TARGET_ECONNRESET,
759 [ENOBUFS] = TARGET_ENOBUFS,
760 [EISCONN] = TARGET_EISCONN,
761 [ENOTCONN] = TARGET_ENOTCONN,
762 [EUCLEAN] = TARGET_EUCLEAN,
763 [ENOTNAM] = TARGET_ENOTNAM,
764 [ENAVAIL] = TARGET_ENAVAIL,
765 [EISNAM] = TARGET_EISNAM,
766 [EREMOTEIO] = TARGET_EREMOTEIO,
767 [EDQUOT] = TARGET_EDQUOT,
768 [ESHUTDOWN] = TARGET_ESHUTDOWN,
769 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
770 [ETIMEDOUT] = TARGET_ETIMEDOUT,
771 [ECONNREFUSED] = TARGET_ECONNREFUSED,
772 [EHOSTDOWN] = TARGET_EHOSTDOWN,
773 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
774 [EALREADY] = TARGET_EALREADY,
775 [EINPROGRESS] = TARGET_EINPROGRESS,
776 [ESTALE] = TARGET_ESTALE,
777 [ECANCELED] = TARGET_ECANCELED,
778 [ENOMEDIUM] = TARGET_ENOMEDIUM,
779 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
781 [ENOKEY] = TARGET_ENOKEY,
784 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
787 [EKEYREVOKED] = TARGET_EKEYREVOKED,
790 [EKEYREJECTED] = TARGET_EKEYREJECTED,
793 [EOWNERDEAD] = TARGET_EOWNERDEAD,
795 #ifdef ENOTRECOVERABLE
796 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
799 [ENOMSG] = TARGET_ENOMSG,
803 static inline int host_to_target_errno(int err)
805 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
806 host_to_target_errno_table[err]) {
807 return host_to_target_errno_table[err];
812 static inline int target_to_host_errno(int err)
814 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
815 target_to_host_errno_table[err]) {
816 return target_to_host_errno_table[err];
821 static inline abi_long get_errno(abi_long ret)
824 return -host_to_target_errno(errno);
829 static inline int is_error(abi_long ret)
831 return (abi_ulong)ret >= (abi_ulong)(-4096);
834 const char *target_strerror(int err)
836 if (err == TARGET_ERESTARTSYS) {
837 return "To be restarted";
839 if (err == TARGET_QEMU_ESIGRETURN) {
840 return "Successful exit from sigreturn";
843 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
846 return strerror(target_to_host_errno(err));
849 #define safe_syscall0(type, name) \
850 static type safe_##name(void) \
852 return safe_syscall(__NR_##name); \
855 #define safe_syscall1(type, name, type1, arg1) \
856 static type safe_##name(type1 arg1) \
858 return safe_syscall(__NR_##name, arg1); \
861 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
862 static type safe_##name(type1 arg1, type2 arg2) \
864 return safe_syscall(__NR_##name, arg1, arg2); \
867 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
868 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
870 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
873 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
875 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
877 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
880 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
881 type4, arg4, type5, arg5) \
882 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
885 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
888 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
889 type4, arg4, type5, arg5, type6, arg6) \
890 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
891 type5 arg5, type6 arg6) \
893 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
896 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
897 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
898 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
899 int, flags, mode_t, mode)
900 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
901 struct rusage *, rusage)
902 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
903 int, options, struct rusage *, rusage)
904 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
905 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
906 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
907 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
908 struct timespec *, tsp, const sigset_t *, sigmask,
910 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
911 int, maxevents, int, timeout, const sigset_t *, sigmask,
913 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
914 const struct timespec *,timeout,int *,uaddr2,int,val3)
915 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
916 safe_syscall2(int, kill, pid_t, pid, int, sig)
917 safe_syscall2(int, tkill, int, tid, int, sig)
918 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
919 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
920 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
921 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
922 unsigned long, pos_l, unsigned long, pos_h)
923 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
924 unsigned long, pos_l, unsigned long, pos_h)
925 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
927 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
928 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
929 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
930 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
931 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
932 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
933 safe_syscall2(int, flock, int, fd, int, operation)
934 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
935 const struct timespec *, uts, size_t, sigsetsize)
936 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
938 safe_syscall2(int, nanosleep, const struct timespec *, req,
939 struct timespec *, rem)
940 #ifdef TARGET_NR_clock_nanosleep
941 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
942 const struct timespec *, req, struct timespec *, rem)
945 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
947 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
948 long, msgtype, int, flags)
949 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
950 unsigned, nsops, const struct timespec *, timeout)
952 /* This host kernel architecture uses a single ipc syscall; fake up
953 * wrappers for the sub-operations to hide this implementation detail.
954 * Annoyingly we can't include linux/ipc.h to get the constant definitions
955 * for the call parameter because some structs in there conflict with the
956 * sys/ipc.h ones. So we just define them here, and rely on them being
957 * the same for all host architectures.
959 #define Q_SEMTIMEDOP 4
962 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
964 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
965 void *, ptr, long, fifth)
966 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
968 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
970 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
972 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
974 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
975 const struct timespec *timeout)
977 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
981 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
982 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
983 size_t, len, unsigned, prio, const struct timespec *, timeout)
984 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
985 size_t, len, unsigned *, prio, const struct timespec *, timeout)
987 /* We do ioctl like this rather than via safe_syscall3 to preserve the
988 * "third argument might be integer or pointer or not present" behaviour of
991 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
992 /* Similarly for fcntl. Note that callers must always:
993 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
994 * use the flock64 struct rather than unsuffixed flock
995 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
998 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1000 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1003 static inline int host_to_target_sock_type(int host_type)
1007 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1009 target_type = TARGET_SOCK_DGRAM;
1012 target_type = TARGET_SOCK_STREAM;
1015 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1019 #if defined(SOCK_CLOEXEC)
1020 if (host_type & SOCK_CLOEXEC) {
1021 target_type |= TARGET_SOCK_CLOEXEC;
1025 #if defined(SOCK_NONBLOCK)
1026 if (host_type & SOCK_NONBLOCK) {
1027 target_type |= TARGET_SOCK_NONBLOCK;
1034 static abi_ulong target_brk;
1035 static abi_ulong target_original_brk;
1036 static abi_ulong brk_page;
1038 void target_set_brk(abi_ulong new_brk)
1040 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1041 brk_page = HOST_PAGE_ALIGN(target_brk);
1044 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1045 #define DEBUGF_BRK(message, args...)
1047 /* do_brk() must return target values and target errnos. */
1048 abi_long do_brk(abi_ulong new_brk)
1050 abi_long mapped_addr;
1051 abi_ulong new_alloc_size;
1053 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1056 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1059 if (new_brk < target_original_brk) {
1060 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1065 /* If the new brk is less than the highest page reserved to the
1066 * target heap allocation, set it and we're almost done... */
1067 if (new_brk <= brk_page) {
1068 /* Heap contents are initialized to zero, as for anonymous
1070 if (new_brk > target_brk) {
1071 memset(g2h(target_brk), 0, new_brk - target_brk);
1073 target_brk = new_brk;
1074 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1078 /* We need to allocate more memory after the brk... Note that
1079 * we don't use MAP_FIXED because that will map over the top of
1080 * any existing mapping (like the one with the host libc or qemu
1081 * itself); instead we treat "mapped but at wrong address" as
1082 * a failure and unmap again.
1084 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1085 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1086 PROT_READ|PROT_WRITE,
1087 MAP_ANON|MAP_PRIVATE, 0, 0));
1089 if (mapped_addr == brk_page) {
1090 /* Heap contents are initialized to zero, as for anonymous
1091 * mapped pages. Technically the new pages are already
1092 * initialized to zero since they *are* anonymous mapped
1093 * pages, however we have to take care with the contents that
1094 * come from the remaining part of the previous page: it may
1095 * contains garbage data due to a previous heap usage (grown
1096 * then shrunken). */
1097 memset(g2h(target_brk), 0, brk_page - target_brk);
1099 target_brk = new_brk;
1100 brk_page = HOST_PAGE_ALIGN(target_brk);
1101 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1104 } else if (mapped_addr != -1) {
1105 /* Mapped but at wrong address, meaning there wasn't actually
1106 * enough space for this brk.
1108 target_munmap(mapped_addr, new_alloc_size);
1110 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1113 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1116 #if defined(TARGET_ALPHA)
1117 /* We (partially) emulate OSF/1 on Alpha, which requires we
1118 return a proper errno, not an unchanged brk value. */
1119 return -TARGET_ENOMEM;
1121 /* For everything else, return the previous break. */
1125 static inline abi_long copy_from_user_fdset(fd_set *fds,
1126 abi_ulong target_fds_addr,
1130 abi_ulong b, *target_fds;
1132 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1133 if (!(target_fds = lock_user(VERIFY_READ,
1135 sizeof(abi_ulong) * nw,
1137 return -TARGET_EFAULT;
1141 for (i = 0; i < nw; i++) {
1142 /* grab the abi_ulong */
1143 __get_user(b, &target_fds[i]);
1144 for (j = 0; j < TARGET_ABI_BITS; j++) {
1145 /* check the bit inside the abi_ulong */
1152 unlock_user(target_fds, target_fds_addr, 0);
1157 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1158 abi_ulong target_fds_addr,
1161 if (target_fds_addr) {
1162 if (copy_from_user_fdset(fds, target_fds_addr, n))
1163 return -TARGET_EFAULT;
1171 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1177 abi_ulong *target_fds;
1179 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1180 if (!(target_fds = lock_user(VERIFY_WRITE,
1182 sizeof(abi_ulong) * nw,
1184 return -TARGET_EFAULT;
1187 for (i = 0; i < nw; i++) {
1189 for (j = 0; j < TARGET_ABI_BITS; j++) {
1190 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1193 __put_user(v, &target_fds[i]);
1196 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1201 #if defined(__alpha__)
1202 #define HOST_HZ 1024
1207 static inline abi_long host_to_target_clock_t(long ticks)
1209 #if HOST_HZ == TARGET_HZ
1212 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1216 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1217 const struct rusage *rusage)
1219 struct target_rusage *target_rusage;
1221 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1222 return -TARGET_EFAULT;
1223 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1224 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1225 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1226 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1227 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1228 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1229 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1230 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1231 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1232 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1233 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1234 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1235 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1236 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1237 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1238 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1239 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1240 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1241 unlock_user_struct(target_rusage, target_addr, 1);
1246 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1248 abi_ulong target_rlim_swap;
1251 target_rlim_swap = tswapal(target_rlim);
1252 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1253 return RLIM_INFINITY;
1255 result = target_rlim_swap;
1256 if (target_rlim_swap != (rlim_t)result)
1257 return RLIM_INFINITY;
1262 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1264 abi_ulong target_rlim_swap;
1267 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1268 target_rlim_swap = TARGET_RLIM_INFINITY;
1270 target_rlim_swap = rlim;
1271 result = tswapal(target_rlim_swap);
1276 static inline int target_to_host_resource(int code)
1279 case TARGET_RLIMIT_AS:
1281 case TARGET_RLIMIT_CORE:
1283 case TARGET_RLIMIT_CPU:
1285 case TARGET_RLIMIT_DATA:
1287 case TARGET_RLIMIT_FSIZE:
1288 return RLIMIT_FSIZE;
1289 case TARGET_RLIMIT_LOCKS:
1290 return RLIMIT_LOCKS;
1291 case TARGET_RLIMIT_MEMLOCK:
1292 return RLIMIT_MEMLOCK;
1293 case TARGET_RLIMIT_MSGQUEUE:
1294 return RLIMIT_MSGQUEUE;
1295 case TARGET_RLIMIT_NICE:
1297 case TARGET_RLIMIT_NOFILE:
1298 return RLIMIT_NOFILE;
1299 case TARGET_RLIMIT_NPROC:
1300 return RLIMIT_NPROC;
1301 case TARGET_RLIMIT_RSS:
1303 case TARGET_RLIMIT_RTPRIO:
1304 return RLIMIT_RTPRIO;
1305 case TARGET_RLIMIT_SIGPENDING:
1306 return RLIMIT_SIGPENDING;
1307 case TARGET_RLIMIT_STACK:
1308 return RLIMIT_STACK;
1314 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1315 abi_ulong target_tv_addr)
1317 struct target_timeval *target_tv;
1319 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1320 return -TARGET_EFAULT;
1322 __get_user(tv->tv_sec, &target_tv->tv_sec);
1323 __get_user(tv->tv_usec, &target_tv->tv_usec);
1325 unlock_user_struct(target_tv, target_tv_addr, 0);
1330 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1331 const struct timeval *tv)
1333 struct target_timeval *target_tv;
1335 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1336 return -TARGET_EFAULT;
1338 __put_user(tv->tv_sec, &target_tv->tv_sec);
1339 __put_user(tv->tv_usec, &target_tv->tv_usec);
1341 unlock_user_struct(target_tv, target_tv_addr, 1);
1346 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1347 abi_ulong target_tz_addr)
1349 struct target_timezone *target_tz;
1351 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1352 return -TARGET_EFAULT;
1355 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1356 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1358 unlock_user_struct(target_tz, target_tz_addr, 0);
1363 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1366 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1367 abi_ulong target_mq_attr_addr)
1369 struct target_mq_attr *target_mq_attr;
1371 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1372 target_mq_attr_addr, 1))
1373 return -TARGET_EFAULT;
1375 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1376 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1377 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1378 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1380 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1385 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1386 const struct mq_attr *attr)
1388 struct target_mq_attr *target_mq_attr;
1390 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1391 target_mq_attr_addr, 0))
1392 return -TARGET_EFAULT;
1394 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1395 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1396 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1397 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1399 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1405 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1406 /* do_select() must return target values and target errnos. */
1407 static abi_long do_select(int n,
1408 abi_ulong rfd_addr, abi_ulong wfd_addr,
1409 abi_ulong efd_addr, abi_ulong target_tv_addr)
1411 fd_set rfds, wfds, efds;
1412 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1414 struct timespec ts, *ts_ptr;
1417 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1421 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1425 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1430 if (target_tv_addr) {
1431 if (copy_from_user_timeval(&tv, target_tv_addr))
1432 return -TARGET_EFAULT;
1433 ts.tv_sec = tv.tv_sec;
1434 ts.tv_nsec = tv.tv_usec * 1000;
1440 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1443 if (!is_error(ret)) {
1444 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1445 return -TARGET_EFAULT;
1446 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1447 return -TARGET_EFAULT;
1448 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1449 return -TARGET_EFAULT;
1451 if (target_tv_addr) {
1452 tv.tv_sec = ts.tv_sec;
1453 tv.tv_usec = ts.tv_nsec / 1000;
1454 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1455 return -TARGET_EFAULT;
1463 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1464 static abi_long do_old_select(abi_ulong arg1)
1466 struct target_sel_arg_struct *sel;
1467 abi_ulong inp, outp, exp, tvp;
1470 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1471 return -TARGET_EFAULT;
1474 nsel = tswapal(sel->n);
1475 inp = tswapal(sel->inp);
1476 outp = tswapal(sel->outp);
1477 exp = tswapal(sel->exp);
1478 tvp = tswapal(sel->tvp);
1480 unlock_user_struct(sel, arg1, 0);
1482 return do_select(nsel, inp, outp, exp, tvp);
1487 static abi_long do_pipe2(int host_pipe[], int flags)
1490 return pipe2(host_pipe, flags);
1496 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1497 int flags, int is_pipe2)
1501 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1504 return get_errno(ret);
1506 /* Several targets have special calling conventions for the original
1507 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1509 #if defined(TARGET_ALPHA)
1510 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1511 return host_pipe[0];
1512 #elif defined(TARGET_MIPS)
1513 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1514 return host_pipe[0];
1515 #elif defined(TARGET_SH4)
1516 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1517 return host_pipe[0];
1518 #elif defined(TARGET_SPARC)
1519 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1520 return host_pipe[0];
1524 if (put_user_s32(host_pipe[0], pipedes)
1525 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1526 return -TARGET_EFAULT;
1527 return get_errno(ret);
1530 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1531 abi_ulong target_addr,
1534 struct target_ip_mreqn *target_smreqn;
1536 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1538 return -TARGET_EFAULT;
1539 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1540 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1541 if (len == sizeof(struct target_ip_mreqn))
1542 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1543 unlock_user(target_smreqn, target_addr, 0);
1548 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1549 abi_ulong target_addr,
1552 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1553 sa_family_t sa_family;
1554 struct target_sockaddr *target_saddr;
1556 if (fd_trans_target_to_host_addr(fd)) {
1557 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1560 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1562 return -TARGET_EFAULT;
1564 sa_family = tswap16(target_saddr->sa_family);
1566 /* Oops. The caller might send a incomplete sun_path; sun_path
1567 * must be terminated by \0 (see the manual page), but
1568 * unfortunately it is quite common to specify sockaddr_un
1569 * length as "strlen(x->sun_path)" while it should be
1570 * "strlen(...) + 1". We'll fix that here if needed.
1571 * Linux kernel has a similar feature.
1574 if (sa_family == AF_UNIX) {
1575 if (len < unix_maxlen && len > 0) {
1576 char *cp = (char*)target_saddr;
1578 if ( cp[len-1] && !cp[len] )
1581 if (len > unix_maxlen)
1585 memcpy(addr, target_saddr, len);
1586 addr->sa_family = sa_family;
1587 if (sa_family == AF_NETLINK) {
1588 struct sockaddr_nl *nladdr;
1590 nladdr = (struct sockaddr_nl *)addr;
1591 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1592 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1593 } else if (sa_family == AF_PACKET) {
1594 struct target_sockaddr_ll *lladdr;
1596 lladdr = (struct target_sockaddr_ll *)addr;
1597 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1598 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1600 unlock_user(target_saddr, target_addr, 0);
1605 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1606 struct sockaddr *addr,
1609 struct target_sockaddr *target_saddr;
1615 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1617 return -TARGET_EFAULT;
1618 memcpy(target_saddr, addr, len);
1619 if (len >= offsetof(struct target_sockaddr, sa_family) +
1620 sizeof(target_saddr->sa_family)) {
1621 target_saddr->sa_family = tswap16(addr->sa_family);
1623 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1624 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1625 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1626 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1627 } else if (addr->sa_family == AF_PACKET) {
1628 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1629 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1630 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1632 unlock_user(target_saddr, target_addr, len);
1637 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1638 struct target_msghdr *target_msgh)
1640 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1641 abi_long msg_controllen;
1642 abi_ulong target_cmsg_addr;
1643 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1644 socklen_t space = 0;
1646 msg_controllen = tswapal(target_msgh->msg_controllen);
1647 if (msg_controllen < sizeof (struct target_cmsghdr))
1649 target_cmsg_addr = tswapal(target_msgh->msg_control);
1650 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1651 target_cmsg_start = target_cmsg;
1653 return -TARGET_EFAULT;
1655 while (cmsg && target_cmsg) {
1656 void *data = CMSG_DATA(cmsg);
1657 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1659 int len = tswapal(target_cmsg->cmsg_len)
1660 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1662 space += CMSG_SPACE(len);
1663 if (space > msgh->msg_controllen) {
1664 space -= CMSG_SPACE(len);
1665 /* This is a QEMU bug, since we allocated the payload
1666 * area ourselves (unlike overflow in host-to-target
1667 * conversion, which is just the guest giving us a buffer
1668 * that's too small). It can't happen for the payload types
1669 * we currently support; if it becomes an issue in future
1670 * we would need to improve our allocation strategy to
1671 * something more intelligent than "twice the size of the
1672 * target buffer we're reading from".
1674 gemu_log("Host cmsg overflow\n");
1678 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1679 cmsg->cmsg_level = SOL_SOCKET;
1681 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1683 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1684 cmsg->cmsg_len = CMSG_LEN(len);
1686 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1687 int *fd = (int *)data;
1688 int *target_fd = (int *)target_data;
1689 int i, numfds = len / sizeof(int);
1691 for (i = 0; i < numfds; i++) {
1692 __get_user(fd[i], target_fd + i);
1694 } else if (cmsg->cmsg_level == SOL_SOCKET
1695 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1696 struct ucred *cred = (struct ucred *)data;
1697 struct target_ucred *target_cred =
1698 (struct target_ucred *)target_data;
1700 __get_user(cred->pid, &target_cred->pid);
1701 __get_user(cred->uid, &target_cred->uid);
1702 __get_user(cred->gid, &target_cred->gid);
1704 gemu_log("Unsupported ancillary data: %d/%d\n",
1705 cmsg->cmsg_level, cmsg->cmsg_type);
1706 memcpy(data, target_data, len);
1709 cmsg = CMSG_NXTHDR(msgh, cmsg);
1710 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1713 unlock_user(target_cmsg, target_cmsg_addr, 0);
1715 msgh->msg_controllen = space;
1719 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1720 struct msghdr *msgh)
1722 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1723 abi_long msg_controllen;
1724 abi_ulong target_cmsg_addr;
1725 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1726 socklen_t space = 0;
1728 msg_controllen = tswapal(target_msgh->msg_controllen);
1729 if (msg_controllen < sizeof (struct target_cmsghdr))
1731 target_cmsg_addr = tswapal(target_msgh->msg_control);
1732 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1733 target_cmsg_start = target_cmsg;
1735 return -TARGET_EFAULT;
1737 while (cmsg && target_cmsg) {
1738 void *data = CMSG_DATA(cmsg);
1739 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1741 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1742 int tgt_len, tgt_space;
1744 /* We never copy a half-header but may copy half-data;
1745 * this is Linux's behaviour in put_cmsg(). Note that
1746 * truncation here is a guest problem (which we report
1747 * to the guest via the CTRUNC bit), unlike truncation
1748 * in target_to_host_cmsg, which is a QEMU bug.
1750 if (msg_controllen < sizeof(struct cmsghdr)) {
1751 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1755 if (cmsg->cmsg_level == SOL_SOCKET) {
1756 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1758 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1760 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1762 tgt_len = TARGET_CMSG_LEN(len);
1764 /* Payload types which need a different size of payload on
1765 * the target must adjust tgt_len here.
1767 switch (cmsg->cmsg_level) {
1769 switch (cmsg->cmsg_type) {
1771 tgt_len = sizeof(struct target_timeval);
1780 if (msg_controllen < tgt_len) {
1781 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1782 tgt_len = msg_controllen;
1785 /* We must now copy-and-convert len bytes of payload
1786 * into tgt_len bytes of destination space. Bear in mind
1787 * that in both source and destination we may be dealing
1788 * with a truncated value!
1790 switch (cmsg->cmsg_level) {
1792 switch (cmsg->cmsg_type) {
1795 int *fd = (int *)data;
1796 int *target_fd = (int *)target_data;
1797 int i, numfds = tgt_len / sizeof(int);
1799 for (i = 0; i < numfds; i++) {
1800 __put_user(fd[i], target_fd + i);
1806 struct timeval *tv = (struct timeval *)data;
1807 struct target_timeval *target_tv =
1808 (struct target_timeval *)target_data;
1810 if (len != sizeof(struct timeval) ||
1811 tgt_len != sizeof(struct target_timeval)) {
1815 /* copy struct timeval to target */
1816 __put_user(tv->tv_sec, &target_tv->tv_sec);
1817 __put_user(tv->tv_usec, &target_tv->tv_usec);
1820 case SCM_CREDENTIALS:
1822 struct ucred *cred = (struct ucred *)data;
1823 struct target_ucred *target_cred =
1824 (struct target_ucred *)target_data;
1826 __put_user(cred->pid, &target_cred->pid);
1827 __put_user(cred->uid, &target_cred->uid);
1828 __put_user(cred->gid, &target_cred->gid);
1838 gemu_log("Unsupported ancillary data: %d/%d\n",
1839 cmsg->cmsg_level, cmsg->cmsg_type);
1840 memcpy(target_data, data, MIN(len, tgt_len));
1841 if (tgt_len > len) {
1842 memset(target_data + len, 0, tgt_len - len);
1846 target_cmsg->cmsg_len = tswapal(tgt_len);
1847 tgt_space = TARGET_CMSG_SPACE(len);
1848 if (msg_controllen < tgt_space) {
1849 tgt_space = msg_controllen;
1851 msg_controllen -= tgt_space;
1853 cmsg = CMSG_NXTHDR(msgh, cmsg);
1854 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1857 unlock_user(target_cmsg, target_cmsg_addr, space);
1859 target_msgh->msg_controllen = tswapal(space);
1863 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1865 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1866 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1867 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1868 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1869 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1872 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1874 abi_long (*host_to_target_nlmsg)
1875 (struct nlmsghdr *))
1880 while (len > sizeof(struct nlmsghdr)) {
1882 nlmsg_len = nlh->nlmsg_len;
1883 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1888 switch (nlh->nlmsg_type) {
1890 tswap_nlmsghdr(nlh);
1896 struct nlmsgerr *e = NLMSG_DATA(nlh);
1897 e->error = tswap32(e->error);
1898 tswap_nlmsghdr(&e->msg);
1899 tswap_nlmsghdr(nlh);
1903 ret = host_to_target_nlmsg(nlh);
1905 tswap_nlmsghdr(nlh);
1910 tswap_nlmsghdr(nlh);
1911 len -= NLMSG_ALIGN(nlmsg_len);
1912 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1917 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1919 abi_long (*target_to_host_nlmsg)
1920 (struct nlmsghdr *))
1924 while (len > sizeof(struct nlmsghdr)) {
1925 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1926 tswap32(nlh->nlmsg_len) > len) {
1929 tswap_nlmsghdr(nlh);
1930 switch (nlh->nlmsg_type) {
1937 struct nlmsgerr *e = NLMSG_DATA(nlh);
1938 e->error = tswap32(e->error);
1939 tswap_nlmsghdr(&e->msg);
1943 ret = target_to_host_nlmsg(nlh);
1948 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1949 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1954 #ifdef CONFIG_RTNETLINK
1955 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1956 size_t len, void *context,
1957 abi_long (*host_to_target_nlattr)
1961 unsigned short nla_len;
1964 while (len > sizeof(struct nlattr)) {
1965 nla_len = nlattr->nla_len;
1966 if (nla_len < sizeof(struct nlattr) ||
1970 ret = host_to_target_nlattr(nlattr, context);
1971 nlattr->nla_len = tswap16(nlattr->nla_len);
1972 nlattr->nla_type = tswap16(nlattr->nla_type);
1976 len -= NLA_ALIGN(nla_len);
1977 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1982 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1984 abi_long (*host_to_target_rtattr)
1987 unsigned short rta_len;
1990 while (len > sizeof(struct rtattr)) {
1991 rta_len = rtattr->rta_len;
1992 if (rta_len < sizeof(struct rtattr) ||
1996 ret = host_to_target_rtattr(rtattr);
1997 rtattr->rta_len = tswap16(rtattr->rta_len);
1998 rtattr->rta_type = tswap16(rtattr->rta_type);
2002 len -= RTA_ALIGN(rta_len);
2003 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2008 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2010 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2017 switch (nlattr->nla_type) {
2019 case QEMU_IFLA_BR_FDB_FLUSH:
2022 case QEMU_IFLA_BR_GROUP_ADDR:
2025 case QEMU_IFLA_BR_VLAN_FILTERING:
2026 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2027 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2028 case QEMU_IFLA_BR_MCAST_ROUTER:
2029 case QEMU_IFLA_BR_MCAST_SNOOPING:
2030 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2031 case QEMU_IFLA_BR_MCAST_QUERIER:
2032 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2033 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2034 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2037 case QEMU_IFLA_BR_PRIORITY:
2038 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2039 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2040 case QEMU_IFLA_BR_ROOT_PORT:
2041 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2042 u16 = NLA_DATA(nlattr);
2043 *u16 = tswap16(*u16);
2046 case QEMU_IFLA_BR_FORWARD_DELAY:
2047 case QEMU_IFLA_BR_HELLO_TIME:
2048 case QEMU_IFLA_BR_MAX_AGE:
2049 case QEMU_IFLA_BR_AGEING_TIME:
2050 case QEMU_IFLA_BR_STP_STATE:
2051 case QEMU_IFLA_BR_ROOT_PATH_COST:
2052 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2053 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2054 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2055 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2056 u32 = NLA_DATA(nlattr);
2057 *u32 = tswap32(*u32);
2060 case QEMU_IFLA_BR_HELLO_TIMER:
2061 case QEMU_IFLA_BR_TCN_TIMER:
2062 case QEMU_IFLA_BR_GC_TIMER:
2063 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2064 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2065 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2066 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2067 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2068 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2069 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2070 u64 = NLA_DATA(nlattr);
2071 *u64 = tswap64(*u64);
2073 /* ifla_bridge_id: uin8_t[] */
2074 case QEMU_IFLA_BR_ROOT_ID:
2075 case QEMU_IFLA_BR_BRIDGE_ID:
2078 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2084 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2091 switch (nlattr->nla_type) {
2093 case QEMU_IFLA_BRPORT_STATE:
2094 case QEMU_IFLA_BRPORT_MODE:
2095 case QEMU_IFLA_BRPORT_GUARD:
2096 case QEMU_IFLA_BRPORT_PROTECT:
2097 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2098 case QEMU_IFLA_BRPORT_LEARNING:
2099 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2100 case QEMU_IFLA_BRPORT_PROXYARP:
2101 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2102 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2103 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2104 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2105 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2108 case QEMU_IFLA_BRPORT_PRIORITY:
2109 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2110 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2111 case QEMU_IFLA_BRPORT_ID:
2112 case QEMU_IFLA_BRPORT_NO:
2113 u16 = NLA_DATA(nlattr);
2114 *u16 = tswap16(*u16);
2117 case QEMU_IFLA_BRPORT_COST:
2118 u32 = NLA_DATA(nlattr);
2119 *u32 = tswap32(*u32);
2122 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2123 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2124 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2125 u64 = NLA_DATA(nlattr);
2126 *u64 = tswap64(*u64);
2128 /* ifla_bridge_id: uint8_t[] */
2129 case QEMU_IFLA_BRPORT_ROOT_ID:
2130 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2133 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2139 struct linkinfo_context {
2146 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2149 struct linkinfo_context *li_context = context;
2151 switch (nlattr->nla_type) {
2153 case QEMU_IFLA_INFO_KIND:
2154 li_context->name = NLA_DATA(nlattr);
2155 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2157 case QEMU_IFLA_INFO_SLAVE_KIND:
2158 li_context->slave_name = NLA_DATA(nlattr);
2159 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2162 case QEMU_IFLA_INFO_XSTATS:
2163 /* FIXME: only used by CAN */
2166 case QEMU_IFLA_INFO_DATA:
2167 if (strncmp(li_context->name, "bridge",
2168 li_context->len) == 0) {
2169 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2172 host_to_target_data_bridge_nlattr);
2174 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2177 case QEMU_IFLA_INFO_SLAVE_DATA:
2178 if (strncmp(li_context->slave_name, "bridge",
2179 li_context->slave_len) == 0) {
2180 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2183 host_to_target_slave_data_bridge_nlattr);
2185 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2186 li_context->slave_name);
2190 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2197 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2203 switch (nlattr->nla_type) {
2204 case QEMU_IFLA_INET_CONF:
2205 u32 = NLA_DATA(nlattr);
2206 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2208 u32[i] = tswap32(u32[i]);
2212 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2217 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2222 struct ifla_cacheinfo *ci;
2225 switch (nlattr->nla_type) {
2227 case QEMU_IFLA_INET6_TOKEN:
2230 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2233 case QEMU_IFLA_INET6_FLAGS:
2234 u32 = NLA_DATA(nlattr);
2235 *u32 = tswap32(*u32);
2238 case QEMU_IFLA_INET6_CONF:
2239 u32 = NLA_DATA(nlattr);
2240 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2242 u32[i] = tswap32(u32[i]);
2245 /* ifla_cacheinfo */
2246 case QEMU_IFLA_INET6_CACHEINFO:
2247 ci = NLA_DATA(nlattr);
2248 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2249 ci->tstamp = tswap32(ci->tstamp);
2250 ci->reachable_time = tswap32(ci->reachable_time);
2251 ci->retrans_time = tswap32(ci->retrans_time);
2254 case QEMU_IFLA_INET6_STATS:
2255 case QEMU_IFLA_INET6_ICMP6STATS:
2256 u64 = NLA_DATA(nlattr);
2257 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2259 u64[i] = tswap64(u64[i]);
2263 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2268 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2271 switch (nlattr->nla_type) {
2273 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2275 host_to_target_data_inet_nlattr);
2277 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2279 host_to_target_data_inet6_nlattr);
2281 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2287 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2290 struct rtnl_link_stats *st;
2291 struct rtnl_link_stats64 *st64;
2292 struct rtnl_link_ifmap *map;
2293 struct linkinfo_context li_context;
2295 switch (rtattr->rta_type) {
2297 case QEMU_IFLA_ADDRESS:
2298 case QEMU_IFLA_BROADCAST:
2300 case QEMU_IFLA_IFNAME:
2301 case QEMU_IFLA_QDISC:
2304 case QEMU_IFLA_OPERSTATE:
2305 case QEMU_IFLA_LINKMODE:
2306 case QEMU_IFLA_CARRIER:
2307 case QEMU_IFLA_PROTO_DOWN:
2311 case QEMU_IFLA_LINK:
2312 case QEMU_IFLA_WEIGHT:
2313 case QEMU_IFLA_TXQLEN:
2314 case QEMU_IFLA_CARRIER_CHANGES:
2315 case QEMU_IFLA_NUM_RX_QUEUES:
2316 case QEMU_IFLA_NUM_TX_QUEUES:
2317 case QEMU_IFLA_PROMISCUITY:
2318 case QEMU_IFLA_EXT_MASK:
2319 case QEMU_IFLA_LINK_NETNSID:
2320 case QEMU_IFLA_GROUP:
2321 case QEMU_IFLA_MASTER:
2322 case QEMU_IFLA_NUM_VF:
2323 u32 = RTA_DATA(rtattr);
2324 *u32 = tswap32(*u32);
2326 /* struct rtnl_link_stats */
2327 case QEMU_IFLA_STATS:
2328 st = RTA_DATA(rtattr);
2329 st->rx_packets = tswap32(st->rx_packets);
2330 st->tx_packets = tswap32(st->tx_packets);
2331 st->rx_bytes = tswap32(st->rx_bytes);
2332 st->tx_bytes = tswap32(st->tx_bytes);
2333 st->rx_errors = tswap32(st->rx_errors);
2334 st->tx_errors = tswap32(st->tx_errors);
2335 st->rx_dropped = tswap32(st->rx_dropped);
2336 st->tx_dropped = tswap32(st->tx_dropped);
2337 st->multicast = tswap32(st->multicast);
2338 st->collisions = tswap32(st->collisions);
2340 /* detailed rx_errors: */
2341 st->rx_length_errors = tswap32(st->rx_length_errors);
2342 st->rx_over_errors = tswap32(st->rx_over_errors);
2343 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2344 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2345 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2346 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2348 /* detailed tx_errors */
2349 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2350 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2351 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2352 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2353 st->tx_window_errors = tswap32(st->tx_window_errors);
2356 st->rx_compressed = tswap32(st->rx_compressed);
2357 st->tx_compressed = tswap32(st->tx_compressed);
2359 /* struct rtnl_link_stats64 */
2360 case QEMU_IFLA_STATS64:
2361 st64 = RTA_DATA(rtattr);
2362 st64->rx_packets = tswap64(st64->rx_packets);
2363 st64->tx_packets = tswap64(st64->tx_packets);
2364 st64->rx_bytes = tswap64(st64->rx_bytes);
2365 st64->tx_bytes = tswap64(st64->tx_bytes);
2366 st64->rx_errors = tswap64(st64->rx_errors);
2367 st64->tx_errors = tswap64(st64->tx_errors);
2368 st64->rx_dropped = tswap64(st64->rx_dropped);
2369 st64->tx_dropped = tswap64(st64->tx_dropped);
2370 st64->multicast = tswap64(st64->multicast);
2371 st64->collisions = tswap64(st64->collisions);
2373 /* detailed rx_errors: */
2374 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2375 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2376 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2377 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2378 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2379 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2381 /* detailed tx_errors */
2382 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2383 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2384 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2385 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2386 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2389 st64->rx_compressed = tswap64(st64->rx_compressed);
2390 st64->tx_compressed = tswap64(st64->tx_compressed);
2392 /* struct rtnl_link_ifmap */
2394 map = RTA_DATA(rtattr);
2395 map->mem_start = tswap64(map->mem_start);
2396 map->mem_end = tswap64(map->mem_end);
2397 map->base_addr = tswap64(map->base_addr);
2398 map->irq = tswap16(map->irq);
2401 case QEMU_IFLA_LINKINFO:
2402 memset(&li_context, 0, sizeof(li_context));
2403 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2405 host_to_target_data_linkinfo_nlattr);
2406 case QEMU_IFLA_AF_SPEC:
2407 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2409 host_to_target_data_spec_nlattr);
2411 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2417 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2420 struct ifa_cacheinfo *ci;
2422 switch (rtattr->rta_type) {
2423 /* binary: depends on family type */
2433 u32 = RTA_DATA(rtattr);
2434 *u32 = tswap32(*u32);
2436 /* struct ifa_cacheinfo */
2438 ci = RTA_DATA(rtattr);
2439 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2440 ci->ifa_valid = tswap32(ci->ifa_valid);
2441 ci->cstamp = tswap32(ci->cstamp);
2442 ci->tstamp = tswap32(ci->tstamp);
2445 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2451 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2454 switch (rtattr->rta_type) {
2455 /* binary: depends on family type */
2464 u32 = RTA_DATA(rtattr);
2465 *u32 = tswap32(*u32);
2468 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2474 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2475 uint32_t rtattr_len)
2477 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2478 host_to_target_data_link_rtattr);
2481 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2482 uint32_t rtattr_len)
2484 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2485 host_to_target_data_addr_rtattr);
2488 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2489 uint32_t rtattr_len)
2491 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2492 host_to_target_data_route_rtattr);
2495 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2498 struct ifinfomsg *ifi;
2499 struct ifaddrmsg *ifa;
2502 nlmsg_len = nlh->nlmsg_len;
2503 switch (nlh->nlmsg_type) {
2507 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2508 ifi = NLMSG_DATA(nlh);
2509 ifi->ifi_type = tswap16(ifi->ifi_type);
2510 ifi->ifi_index = tswap32(ifi->ifi_index);
2511 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2512 ifi->ifi_change = tswap32(ifi->ifi_change);
2513 host_to_target_link_rtattr(IFLA_RTA(ifi),
2514 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2520 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2521 ifa = NLMSG_DATA(nlh);
2522 ifa->ifa_index = tswap32(ifa->ifa_index);
2523 host_to_target_addr_rtattr(IFA_RTA(ifa),
2524 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2530 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2531 rtm = NLMSG_DATA(nlh);
2532 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2533 host_to_target_route_rtattr(RTM_RTA(rtm),
2534 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2538 return -TARGET_EINVAL;
2543 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2546 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2549 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2551 abi_long (*target_to_host_rtattr)
2556 while (len >= sizeof(struct rtattr)) {
2557 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2558 tswap16(rtattr->rta_len) > len) {
2561 rtattr->rta_len = tswap16(rtattr->rta_len);
2562 rtattr->rta_type = tswap16(rtattr->rta_type);
2563 ret = target_to_host_rtattr(rtattr);
2567 len -= RTA_ALIGN(rtattr->rta_len);
2568 rtattr = (struct rtattr *)(((char *)rtattr) +
2569 RTA_ALIGN(rtattr->rta_len));
2574 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2576 switch (rtattr->rta_type) {
2578 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2584 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2586 switch (rtattr->rta_type) {
2587 /* binary: depends on family type */
2592 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2598 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2601 switch (rtattr->rta_type) {
2602 /* binary: depends on family type */
2610 u32 = RTA_DATA(rtattr);
2611 *u32 = tswap32(*u32);
2614 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2620 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2621 uint32_t rtattr_len)
2623 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2624 target_to_host_data_link_rtattr);
2627 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2628 uint32_t rtattr_len)
2630 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2631 target_to_host_data_addr_rtattr);
2634 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2635 uint32_t rtattr_len)
2637 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2638 target_to_host_data_route_rtattr);
2641 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2643 struct ifinfomsg *ifi;
2644 struct ifaddrmsg *ifa;
2647 switch (nlh->nlmsg_type) {
2652 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2653 ifi = NLMSG_DATA(nlh);
2654 ifi->ifi_type = tswap16(ifi->ifi_type);
2655 ifi->ifi_index = tswap32(ifi->ifi_index);
2656 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2657 ifi->ifi_change = tswap32(ifi->ifi_change);
2658 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2659 NLMSG_LENGTH(sizeof(*ifi)));
2665 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2666 ifa = NLMSG_DATA(nlh);
2667 ifa->ifa_index = tswap32(ifa->ifa_index);
2668 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2669 NLMSG_LENGTH(sizeof(*ifa)));
2676 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2677 rtm = NLMSG_DATA(nlh);
2678 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2679 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2680 NLMSG_LENGTH(sizeof(*rtm)));
2684 return -TARGET_EOPNOTSUPP;
2689 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2691 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2693 #endif /* CONFIG_RTNETLINK */
2695 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2697 switch (nlh->nlmsg_type) {
2699 gemu_log("Unknown host audit message type %d\n",
2701 return -TARGET_EINVAL;
2706 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2709 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2712 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2714 switch (nlh->nlmsg_type) {
2716 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2717 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2720 gemu_log("Unknown target audit message type %d\n",
2722 return -TARGET_EINVAL;
2728 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2730 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2733 /* do_setsockopt() Must return target values and target errnos. */
2734 static abi_long do_setsockopt(int sockfd, int level, int optname,
2735 abi_ulong optval_addr, socklen_t optlen)
2739 struct ip_mreqn *ip_mreq;
2740 struct ip_mreq_source *ip_mreq_source;
2744 /* TCP options all take an 'int' value. */
2745 if (optlen < sizeof(uint32_t))
2746 return -TARGET_EINVAL;
2748 if (get_user_u32(val, optval_addr))
2749 return -TARGET_EFAULT;
2750 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2757 case IP_ROUTER_ALERT:
2761 case IP_MTU_DISCOVER:
2767 case IP_MULTICAST_TTL:
2768 case IP_MULTICAST_LOOP:
2770 if (optlen >= sizeof(uint32_t)) {
2771 if (get_user_u32(val, optval_addr))
2772 return -TARGET_EFAULT;
2773 } else if (optlen >= 1) {
2774 if (get_user_u8(val, optval_addr))
2775 return -TARGET_EFAULT;
2777 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2779 case IP_ADD_MEMBERSHIP:
2780 case IP_DROP_MEMBERSHIP:
2781 if (optlen < sizeof (struct target_ip_mreq) ||
2782 optlen > sizeof (struct target_ip_mreqn))
2783 return -TARGET_EINVAL;
2785 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2786 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2787 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2790 case IP_BLOCK_SOURCE:
2791 case IP_UNBLOCK_SOURCE:
2792 case IP_ADD_SOURCE_MEMBERSHIP:
2793 case IP_DROP_SOURCE_MEMBERSHIP:
2794 if (optlen != sizeof (struct target_ip_mreq_source))
2795 return -TARGET_EINVAL;
2797 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2798 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2799 unlock_user (ip_mreq_source, optval_addr, 0);
2808 case IPV6_MTU_DISCOVER:
2811 case IPV6_RECVPKTINFO:
2813 if (optlen < sizeof(uint32_t)) {
2814 return -TARGET_EINVAL;
2816 if (get_user_u32(val, optval_addr)) {
2817 return -TARGET_EFAULT;
2819 ret = get_errno(setsockopt(sockfd, level, optname,
2820 &val, sizeof(val)));
2829 /* struct icmp_filter takes an u32 value */
2830 if (optlen < sizeof(uint32_t)) {
2831 return -TARGET_EINVAL;
2834 if (get_user_u32(val, optval_addr)) {
2835 return -TARGET_EFAULT;
2837 ret = get_errno(setsockopt(sockfd, level, optname,
2838 &val, sizeof(val)));
2845 case TARGET_SOL_SOCKET:
2847 case TARGET_SO_RCVTIMEO:
2851 optname = SO_RCVTIMEO;
2854 if (optlen != sizeof(struct target_timeval)) {
2855 return -TARGET_EINVAL;
2858 if (copy_from_user_timeval(&tv, optval_addr)) {
2859 return -TARGET_EFAULT;
2862 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2866 case TARGET_SO_SNDTIMEO:
2867 optname = SO_SNDTIMEO;
2869 case TARGET_SO_ATTACH_FILTER:
2871 struct target_sock_fprog *tfprog;
2872 struct target_sock_filter *tfilter;
2873 struct sock_fprog fprog;
2874 struct sock_filter *filter;
2877 if (optlen != sizeof(*tfprog)) {
2878 return -TARGET_EINVAL;
2880 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2881 return -TARGET_EFAULT;
2883 if (!lock_user_struct(VERIFY_READ, tfilter,
2884 tswapal(tfprog->filter), 0)) {
2885 unlock_user_struct(tfprog, optval_addr, 1);
2886 return -TARGET_EFAULT;
2889 fprog.len = tswap16(tfprog->len);
2890 filter = g_try_new(struct sock_filter, fprog.len);
2891 if (filter == NULL) {
2892 unlock_user_struct(tfilter, tfprog->filter, 1);
2893 unlock_user_struct(tfprog, optval_addr, 1);
2894 return -TARGET_ENOMEM;
2896 for (i = 0; i < fprog.len; i++) {
2897 filter[i].code = tswap16(tfilter[i].code);
2898 filter[i].jt = tfilter[i].jt;
2899 filter[i].jf = tfilter[i].jf;
2900 filter[i].k = tswap32(tfilter[i].k);
2902 fprog.filter = filter;
2904 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2905 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2908 unlock_user_struct(tfilter, tfprog->filter, 1);
2909 unlock_user_struct(tfprog, optval_addr, 1);
2912 case TARGET_SO_BINDTODEVICE:
2914 char *dev_ifname, *addr_ifname;
2916 if (optlen > IFNAMSIZ - 1) {
2917 optlen = IFNAMSIZ - 1;
2919 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2921 return -TARGET_EFAULT;
2923 optname = SO_BINDTODEVICE;
2924 addr_ifname = alloca(IFNAMSIZ);
2925 memcpy(addr_ifname, dev_ifname, optlen);
2926 addr_ifname[optlen] = 0;
2927 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2928 addr_ifname, optlen));
2929 unlock_user (dev_ifname, optval_addr, 0);
2932 /* Options with 'int' argument. */
2933 case TARGET_SO_DEBUG:
2936 case TARGET_SO_REUSEADDR:
2937 optname = SO_REUSEADDR;
2939 case TARGET_SO_TYPE:
2942 case TARGET_SO_ERROR:
2945 case TARGET_SO_DONTROUTE:
2946 optname = SO_DONTROUTE;
2948 case TARGET_SO_BROADCAST:
2949 optname = SO_BROADCAST;
2951 case TARGET_SO_SNDBUF:
2952 optname = SO_SNDBUF;
2954 case TARGET_SO_SNDBUFFORCE:
2955 optname = SO_SNDBUFFORCE;
2957 case TARGET_SO_RCVBUF:
2958 optname = SO_RCVBUF;
2960 case TARGET_SO_RCVBUFFORCE:
2961 optname = SO_RCVBUFFORCE;
2963 case TARGET_SO_KEEPALIVE:
2964 optname = SO_KEEPALIVE;
2966 case TARGET_SO_OOBINLINE:
2967 optname = SO_OOBINLINE;
2969 case TARGET_SO_NO_CHECK:
2970 optname = SO_NO_CHECK;
2972 case TARGET_SO_PRIORITY:
2973 optname = SO_PRIORITY;
2976 case TARGET_SO_BSDCOMPAT:
2977 optname = SO_BSDCOMPAT;
2980 case TARGET_SO_PASSCRED:
2981 optname = SO_PASSCRED;
2983 case TARGET_SO_PASSSEC:
2984 optname = SO_PASSSEC;
2986 case TARGET_SO_TIMESTAMP:
2987 optname = SO_TIMESTAMP;
2989 case TARGET_SO_RCVLOWAT:
2990 optname = SO_RCVLOWAT;
2996 if (optlen < sizeof(uint32_t))
2997 return -TARGET_EINVAL;
2999 if (get_user_u32(val, optval_addr))
3000 return -TARGET_EFAULT;
3001 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3005 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3006 ret = -TARGET_ENOPROTOOPT;
3011 /* do_getsockopt() Must return target values and target errnos. */
3012 static abi_long do_getsockopt(int sockfd, int level, int optname,
3013 abi_ulong optval_addr, abi_ulong optlen)
3020 case TARGET_SOL_SOCKET:
3023 /* These don't just return a single integer */
3024 case TARGET_SO_LINGER:
3025 case TARGET_SO_RCVTIMEO:
3026 case TARGET_SO_SNDTIMEO:
3027 case TARGET_SO_PEERNAME:
3029 case TARGET_SO_PEERCRED: {
3032 struct target_ucred *tcr;
3034 if (get_user_u32(len, optlen)) {
3035 return -TARGET_EFAULT;
3038 return -TARGET_EINVAL;
3042 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3050 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3051 return -TARGET_EFAULT;
3053 __put_user(cr.pid, &tcr->pid);
3054 __put_user(cr.uid, &tcr->uid);
3055 __put_user(cr.gid, &tcr->gid);
3056 unlock_user_struct(tcr, optval_addr, 1);
3057 if (put_user_u32(len, optlen)) {
3058 return -TARGET_EFAULT;
3062 /* Options with 'int' argument. */
3063 case TARGET_SO_DEBUG:
3066 case TARGET_SO_REUSEADDR:
3067 optname = SO_REUSEADDR;
3069 case TARGET_SO_TYPE:
3072 case TARGET_SO_ERROR:
3075 case TARGET_SO_DONTROUTE:
3076 optname = SO_DONTROUTE;
3078 case TARGET_SO_BROADCAST:
3079 optname = SO_BROADCAST;
3081 case TARGET_SO_SNDBUF:
3082 optname = SO_SNDBUF;
3084 case TARGET_SO_RCVBUF:
3085 optname = SO_RCVBUF;
3087 case TARGET_SO_KEEPALIVE:
3088 optname = SO_KEEPALIVE;
3090 case TARGET_SO_OOBINLINE:
3091 optname = SO_OOBINLINE;
3093 case TARGET_SO_NO_CHECK:
3094 optname = SO_NO_CHECK;
3096 case TARGET_SO_PRIORITY:
3097 optname = SO_PRIORITY;
3100 case TARGET_SO_BSDCOMPAT:
3101 optname = SO_BSDCOMPAT;
3104 case TARGET_SO_PASSCRED:
3105 optname = SO_PASSCRED;
3107 case TARGET_SO_TIMESTAMP:
3108 optname = SO_TIMESTAMP;
3110 case TARGET_SO_RCVLOWAT:
3111 optname = SO_RCVLOWAT;
3113 case TARGET_SO_ACCEPTCONN:
3114 optname = SO_ACCEPTCONN;
3121 /* TCP options all take an 'int' value. */
3123 if (get_user_u32(len, optlen))
3124 return -TARGET_EFAULT;
3126 return -TARGET_EINVAL;
3128 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3131 if (optname == SO_TYPE) {
3132 val = host_to_target_sock_type(val);
3137 if (put_user_u32(val, optval_addr))
3138 return -TARGET_EFAULT;
3140 if (put_user_u8(val, optval_addr))
3141 return -TARGET_EFAULT;
3143 if (put_user_u32(len, optlen))
3144 return -TARGET_EFAULT;
3151 case IP_ROUTER_ALERT:
3155 case IP_MTU_DISCOVER:
3161 case IP_MULTICAST_TTL:
3162 case IP_MULTICAST_LOOP:
3163 if (get_user_u32(len, optlen))
3164 return -TARGET_EFAULT;
3166 return -TARGET_EINVAL;
3168 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3171 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3173 if (put_user_u32(len, optlen)
3174 || put_user_u8(val, optval_addr))
3175 return -TARGET_EFAULT;
3177 if (len > sizeof(int))
3179 if (put_user_u32(len, optlen)
3180 || put_user_u32(val, optval_addr))
3181 return -TARGET_EFAULT;
3185 ret = -TARGET_ENOPROTOOPT;
3191 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3193 ret = -TARGET_EOPNOTSUPP;
3199 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3200 abi_ulong count, int copy)
3202 struct target_iovec *target_vec;
3204 abi_ulong total_len, max_len;
3207 bool bad_address = false;
3213 if (count > IOV_MAX) {
3218 vec = g_try_new0(struct iovec, count);
3224 target_vec = lock_user(VERIFY_READ, target_addr,
3225 count * sizeof(struct target_iovec), 1);
3226 if (target_vec == NULL) {
3231 /* ??? If host page size > target page size, this will result in a
3232 value larger than what we can actually support. */
3233 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3236 for (i = 0; i < count; i++) {
3237 abi_ulong base = tswapal(target_vec[i].iov_base);
3238 abi_long len = tswapal(target_vec[i].iov_len);
3243 } else if (len == 0) {
3244 /* Zero length pointer is ignored. */
3245 vec[i].iov_base = 0;
3247 vec[i].iov_base = lock_user(type, base, len, copy);
3248 /* If the first buffer pointer is bad, this is a fault. But
3249 * subsequent bad buffers will result in a partial write; this
3250 * is realized by filling the vector with null pointers and
3252 if (!vec[i].iov_base) {
3263 if (len > max_len - total_len) {
3264 len = max_len - total_len;
3267 vec[i].iov_len = len;
3271 unlock_user(target_vec, target_addr, 0);
3276 if (tswapal(target_vec[i].iov_len) > 0) {
3277 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3280 unlock_user(target_vec, target_addr, 0);
3287 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3288 abi_ulong count, int copy)
3290 struct target_iovec *target_vec;
3293 target_vec = lock_user(VERIFY_READ, target_addr,
3294 count * sizeof(struct target_iovec), 1);
3296 for (i = 0; i < count; i++) {
3297 abi_ulong base = tswapal(target_vec[i].iov_base);
3298 abi_long len = tswapal(target_vec[i].iov_len);
3302 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3304 unlock_user(target_vec, target_addr, 0);
3310 static inline int target_to_host_sock_type(int *type)
3313 int target_type = *type;
3315 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3316 case TARGET_SOCK_DGRAM:
3317 host_type = SOCK_DGRAM;
3319 case TARGET_SOCK_STREAM:
3320 host_type = SOCK_STREAM;
3323 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3326 if (target_type & TARGET_SOCK_CLOEXEC) {
3327 #if defined(SOCK_CLOEXEC)
3328 host_type |= SOCK_CLOEXEC;
3330 return -TARGET_EINVAL;
3333 if (target_type & TARGET_SOCK_NONBLOCK) {
3334 #if defined(SOCK_NONBLOCK)
3335 host_type |= SOCK_NONBLOCK;
3336 #elif !defined(O_NONBLOCK)
3337 return -TARGET_EINVAL;
3344 /* Try to emulate socket type flags after socket creation. */
3345 static int sock_flags_fixup(int fd, int target_type)
3347 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3348 if (target_type & TARGET_SOCK_NONBLOCK) {
3349 int flags = fcntl(fd, F_GETFL);
3350 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3352 return -TARGET_EINVAL;
3359 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3360 abi_ulong target_addr,
3363 struct sockaddr *addr = host_addr;
3364 struct target_sockaddr *target_saddr;
3366 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3367 if (!target_saddr) {
3368 return -TARGET_EFAULT;
3371 memcpy(addr, target_saddr, len);
3372 addr->sa_family = tswap16(target_saddr->sa_family);
3373 /* spkt_protocol is big-endian */
3375 unlock_user(target_saddr, target_addr, 0);
3379 static TargetFdTrans target_packet_trans = {
3380 .target_to_host_addr = packet_target_to_host_sockaddr,
3383 #ifdef CONFIG_RTNETLINK
3384 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3388 ret = target_to_host_nlmsg_route(buf, len);
3396 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3400 ret = host_to_target_nlmsg_route(buf, len);
3408 static TargetFdTrans target_netlink_route_trans = {
3409 .target_to_host_data = netlink_route_target_to_host,
3410 .host_to_target_data = netlink_route_host_to_target,
3412 #endif /* CONFIG_RTNETLINK */
3414 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3418 ret = target_to_host_nlmsg_audit(buf, len);
3426 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3430 ret = host_to_target_nlmsg_audit(buf, len);
3438 static TargetFdTrans target_netlink_audit_trans = {
3439 .target_to_host_data = netlink_audit_target_to_host,
3440 .host_to_target_data = netlink_audit_host_to_target,
3443 /* do_socket() Must return target values and target errnos. */
3444 static abi_long do_socket(int domain, int type, int protocol)
3446 int target_type = type;
3449 ret = target_to_host_sock_type(&type);
3454 if (domain == PF_NETLINK && !(
3455 #ifdef CONFIG_RTNETLINK
3456 protocol == NETLINK_ROUTE ||
3458 protocol == NETLINK_KOBJECT_UEVENT ||
3459 protocol == NETLINK_AUDIT)) {
3460 return -EPFNOSUPPORT;
3463 if (domain == AF_PACKET ||
3464 (domain == AF_INET && type == SOCK_PACKET)) {
3465 protocol = tswap16(protocol);
3468 ret = get_errno(socket(domain, type, protocol));
3470 ret = sock_flags_fixup(ret, target_type);
3471 if (type == SOCK_PACKET) {
3472 /* Manage an obsolete case :
3473 * if socket type is SOCK_PACKET, bind by name
3475 fd_trans_register(ret, &target_packet_trans);
3476 } else if (domain == PF_NETLINK) {
3478 #ifdef CONFIG_RTNETLINK
3480 fd_trans_register(ret, &target_netlink_route_trans);
3483 case NETLINK_KOBJECT_UEVENT:
3484 /* nothing to do: messages are strings */
3487 fd_trans_register(ret, &target_netlink_audit_trans);
3490 g_assert_not_reached();
3497 /* do_bind() Must return target values and target errnos. */
3498 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3504 if ((int)addrlen < 0) {
3505 return -TARGET_EINVAL;
3508 addr = alloca(addrlen+1);
3510 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3514 return get_errno(bind(sockfd, addr, addrlen));
3517 /* do_connect() Must return target values and target errnos. */
3518 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3524 if ((int)addrlen < 0) {
3525 return -TARGET_EINVAL;
3528 addr = alloca(addrlen+1);
3530 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3534 return get_errno(safe_connect(sockfd, addr, addrlen));
3537 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3538 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3539 int flags, int send)
3545 abi_ulong target_vec;
3547 if (msgp->msg_name) {
3548 msg.msg_namelen = tswap32(msgp->msg_namelen);
3549 msg.msg_name = alloca(msg.msg_namelen+1);
3550 ret = target_to_host_sockaddr(fd, msg.msg_name,
3551 tswapal(msgp->msg_name),
3553 if (ret == -TARGET_EFAULT) {
3554 /* For connected sockets msg_name and msg_namelen must
3555 * be ignored, so returning EFAULT immediately is wrong.
3556 * Instead, pass a bad msg_name to the host kernel, and
3557 * let it decide whether to return EFAULT or not.
3559 msg.msg_name = (void *)-1;
3564 msg.msg_name = NULL;
3565 msg.msg_namelen = 0;
3567 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3568 msg.msg_control = alloca(msg.msg_controllen);
3569 msg.msg_flags = tswap32(msgp->msg_flags);
3571 count = tswapal(msgp->msg_iovlen);
3572 target_vec = tswapal(msgp->msg_iov);
3574 if (count > IOV_MAX) {
3575 /* sendrcvmsg returns a different errno for this condition than
3576 * readv/writev, so we must catch it here before lock_iovec() does.
3578 ret = -TARGET_EMSGSIZE;
3582 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3583 target_vec, count, send);
3585 ret = -host_to_target_errno(errno);
3588 msg.msg_iovlen = count;
3592 if (fd_trans_target_to_host_data(fd)) {
3595 host_msg = g_malloc(msg.msg_iov->iov_len);
3596 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3597 ret = fd_trans_target_to_host_data(fd)(host_msg,
3598 msg.msg_iov->iov_len);
3600 msg.msg_iov->iov_base = host_msg;
3601 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3605 ret = target_to_host_cmsg(&msg, msgp);
3607 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3611 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3612 if (!is_error(ret)) {
3614 if (fd_trans_host_to_target_data(fd)) {
3615 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3618 ret = host_to_target_cmsg(msgp, &msg);
3620 if (!is_error(ret)) {
3621 msgp->msg_namelen = tswap32(msg.msg_namelen);
3622 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3623 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3624 msg.msg_name, msg.msg_namelen);
3636 unlock_iovec(vec, target_vec, count, !send);
3641 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3642 int flags, int send)
3645 struct target_msghdr *msgp;
3647 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3651 return -TARGET_EFAULT;
3653 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3654 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3658 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3659 * so it might not have this *mmsg-specific flag either.
3661 #ifndef MSG_WAITFORONE
3662 #define MSG_WAITFORONE 0x10000
3665 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3666 unsigned int vlen, unsigned int flags,
3669 struct target_mmsghdr *mmsgp;
3673 if (vlen > UIO_MAXIOV) {
3677 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3679 return -TARGET_EFAULT;
3682 for (i = 0; i < vlen; i++) {
3683 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3684 if (is_error(ret)) {
3687 mmsgp[i].msg_len = tswap32(ret);
3688 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3689 if (flags & MSG_WAITFORONE) {
3690 flags |= MSG_DONTWAIT;
3694 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3696 /* Return number of datagrams sent if we sent any at all;
3697 * otherwise return the error.
3705 /* do_accept4() Must return target values and target errnos. */
3706 static abi_long do_accept4(int fd, abi_ulong target_addr,
3707 abi_ulong target_addrlen_addr, int flags)
3714 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3716 if (target_addr == 0) {
3717 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3720 /* linux returns EINVAL if addrlen pointer is invalid */
3721 if (get_user_u32(addrlen, target_addrlen_addr))
3722 return -TARGET_EINVAL;
3724 if ((int)addrlen < 0) {
3725 return -TARGET_EINVAL;
3728 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3729 return -TARGET_EINVAL;
3731 addr = alloca(addrlen);
3733 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3734 if (!is_error(ret)) {
3735 host_to_target_sockaddr(target_addr, addr, addrlen);
3736 if (put_user_u32(addrlen, target_addrlen_addr))
3737 ret = -TARGET_EFAULT;
3742 /* do_getpeername() Must return target values and target errnos. */
3743 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3744 abi_ulong target_addrlen_addr)
3750 if (get_user_u32(addrlen, target_addrlen_addr))
3751 return -TARGET_EFAULT;
3753 if ((int)addrlen < 0) {
3754 return -TARGET_EINVAL;
3757 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3758 return -TARGET_EFAULT;
3760 addr = alloca(addrlen);
3762 ret = get_errno(getpeername(fd, addr, &addrlen));
3763 if (!is_error(ret)) {
3764 host_to_target_sockaddr(target_addr, addr, addrlen);
3765 if (put_user_u32(addrlen, target_addrlen_addr))
3766 ret = -TARGET_EFAULT;
3771 /* do_getsockname() Must return target values and target errnos. */
3772 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3773 abi_ulong target_addrlen_addr)
3779 if (get_user_u32(addrlen, target_addrlen_addr))
3780 return -TARGET_EFAULT;
3782 if ((int)addrlen < 0) {
3783 return -TARGET_EINVAL;
3786 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3787 return -TARGET_EFAULT;
3789 addr = alloca(addrlen);
3791 ret = get_errno(getsockname(fd, addr, &addrlen));
3792 if (!is_error(ret)) {
3793 host_to_target_sockaddr(target_addr, addr, addrlen);
3794 if (put_user_u32(addrlen, target_addrlen_addr))
3795 ret = -TARGET_EFAULT;
3800 /* do_socketpair() Must return target values and target errnos. */
3801 static abi_long do_socketpair(int domain, int type, int protocol,
3802 abi_ulong target_tab_addr)
3807 target_to_host_sock_type(&type);
3809 ret = get_errno(socketpair(domain, type, protocol, tab));
3810 if (!is_error(ret)) {
3811 if (put_user_s32(tab[0], target_tab_addr)
3812 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3813 ret = -TARGET_EFAULT;
3818 /* do_sendto() Must return target values and target errnos. */
3819 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3820 abi_ulong target_addr, socklen_t addrlen)
3824 void *copy_msg = NULL;
3827 if ((int)addrlen < 0) {
3828 return -TARGET_EINVAL;
3831 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3833 return -TARGET_EFAULT;
3834 if (fd_trans_target_to_host_data(fd)) {
3835 copy_msg = host_msg;
3836 host_msg = g_malloc(len);
3837 memcpy(host_msg, copy_msg, len);
3838 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3844 addr = alloca(addrlen+1);
3845 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3849 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3851 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3856 host_msg = copy_msg;
3858 unlock_user(host_msg, msg, 0);
3862 /* do_recvfrom() Must return target values and target errnos. */
3863 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3864 abi_ulong target_addr,
3865 abi_ulong target_addrlen)
3872 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3874 return -TARGET_EFAULT;
3876 if (get_user_u32(addrlen, target_addrlen)) {
3877 ret = -TARGET_EFAULT;
3880 if ((int)addrlen < 0) {
3881 ret = -TARGET_EINVAL;
3884 addr = alloca(addrlen);
3885 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3888 addr = NULL; /* To keep compiler quiet. */
3889 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3891 if (!is_error(ret)) {
3892 if (fd_trans_host_to_target_data(fd)) {
3893 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3896 host_to_target_sockaddr(target_addr, addr, addrlen);
3897 if (put_user_u32(addrlen, target_addrlen)) {
3898 ret = -TARGET_EFAULT;
3902 unlock_user(host_msg, msg, len);
3905 unlock_user(host_msg, msg, 0);
3910 #ifdef TARGET_NR_socketcall
3911 /* do_socketcall() must return target values and target errnos. */
3912 static abi_long do_socketcall(int num, abi_ulong vptr)
3914 static const unsigned nargs[] = { /* number of arguments per operation */
3915 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3916 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3917 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3918 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3919 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3920 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3921 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3922 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3923 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3924 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3925 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3926 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3927 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3928 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3929 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3930 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3931 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3932 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3933 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3934 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3936 abi_long a[6]; /* max 6 args */
3939 /* check the range of the first argument num */
3940 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3941 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3942 return -TARGET_EINVAL;
3944 /* ensure we have space for args */
3945 if (nargs[num] > ARRAY_SIZE(a)) {
3946 return -TARGET_EINVAL;
3948 /* collect the arguments in a[] according to nargs[] */
3949 for (i = 0; i < nargs[num]; ++i) {
3950 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3951 return -TARGET_EFAULT;
3954 /* now when we have the args, invoke the appropriate underlying function */
3956 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3957 return do_socket(a[0], a[1], a[2]);
3958 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3959 return do_bind(a[0], a[1], a[2]);
3960 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3961 return do_connect(a[0], a[1], a[2]);
3962 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3963 return get_errno(listen(a[0], a[1]));
3964 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3965 return do_accept4(a[0], a[1], a[2], 0);
3966 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3967 return do_getsockname(a[0], a[1], a[2]);
3968 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3969 return do_getpeername(a[0], a[1], a[2]);
3970 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3971 return do_socketpair(a[0], a[1], a[2], a[3]);
3972 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3973 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3974 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3975 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3976 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3977 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3978 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3979 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3980 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3981 return get_errno(shutdown(a[0], a[1]));
3982 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3983 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3984 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3985 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3986 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3987 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3988 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3989 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3990 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3991 return do_accept4(a[0], a[1], a[2], a[3]);
3992 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3993 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3994 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3995 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3997 gemu_log("Unsupported socketcall: %d\n", num);
3998 return -TARGET_EINVAL;
4003 #define N_SHM_REGIONS 32
4005 static struct shm_region {
4009 } shm_regions[N_SHM_REGIONS];
4011 #ifndef TARGET_SEMID64_DS
4012 /* asm-generic version of this struct */
4013 struct target_semid64_ds
4015 struct target_ipc_perm sem_perm;
4016 abi_ulong sem_otime;
4017 #if TARGET_ABI_BITS == 32
4018 abi_ulong __unused1;
4020 abi_ulong sem_ctime;
4021 #if TARGET_ABI_BITS == 32
4022 abi_ulong __unused2;
4024 abi_ulong sem_nsems;
4025 abi_ulong __unused3;
4026 abi_ulong __unused4;
4030 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4031 abi_ulong target_addr)
4033 struct target_ipc_perm *target_ip;
4034 struct target_semid64_ds *target_sd;
4036 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4037 return -TARGET_EFAULT;
4038 target_ip = &(target_sd->sem_perm);
4039 host_ip->__key = tswap32(target_ip->__key);
4040 host_ip->uid = tswap32(target_ip->uid);
4041 host_ip->gid = tswap32(target_ip->gid);
4042 host_ip->cuid = tswap32(target_ip->cuid);
4043 host_ip->cgid = tswap32(target_ip->cgid);
4044 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4045 host_ip->mode = tswap32(target_ip->mode);
4047 host_ip->mode = tswap16(target_ip->mode);
4049 #if defined(TARGET_PPC)
4050 host_ip->__seq = tswap32(target_ip->__seq);
4052 host_ip->__seq = tswap16(target_ip->__seq);
4054 unlock_user_struct(target_sd, target_addr, 0);
4058 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4059 struct ipc_perm *host_ip)
4061 struct target_ipc_perm *target_ip;
4062 struct target_semid64_ds *target_sd;
4064 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4065 return -TARGET_EFAULT;
4066 target_ip = &(target_sd->sem_perm);
4067 target_ip->__key = tswap32(host_ip->__key);
4068 target_ip->uid = tswap32(host_ip->uid);
4069 target_ip->gid = tswap32(host_ip->gid);
4070 target_ip->cuid = tswap32(host_ip->cuid);
4071 target_ip->cgid = tswap32(host_ip->cgid);
4072 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4073 target_ip->mode = tswap32(host_ip->mode);
4075 target_ip->mode = tswap16(host_ip->mode);
4077 #if defined(TARGET_PPC)
4078 target_ip->__seq = tswap32(host_ip->__seq);
4080 target_ip->__seq = tswap16(host_ip->__seq);
4082 unlock_user_struct(target_sd, target_addr, 1);
4086 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4087 abi_ulong target_addr)
4089 struct target_semid64_ds *target_sd;
4091 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4092 return -TARGET_EFAULT;
4093 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4094 return -TARGET_EFAULT;
4095 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4096 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4097 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4098 unlock_user_struct(target_sd, target_addr, 0);
4102 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4103 struct semid_ds *host_sd)
4105 struct target_semid64_ds *target_sd;
4107 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4108 return -TARGET_EFAULT;
4109 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4110 return -TARGET_EFAULT;
4111 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4112 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4113 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4114 unlock_user_struct(target_sd, target_addr, 1);
4118 struct target_seminfo {
4131 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4132 struct seminfo *host_seminfo)
4134 struct target_seminfo *target_seminfo;
4135 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4136 return -TARGET_EFAULT;
4137 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4138 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4139 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4140 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4141 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4142 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4143 __put_user(host_seminfo->semume, &target_seminfo->semume);
4144 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4145 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4146 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4147 unlock_user_struct(target_seminfo, target_addr, 1);
4153 struct semid_ds *buf;
4154 unsigned short *array;
4155 struct seminfo *__buf;
4158 union target_semun {
4165 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4166 abi_ulong target_addr)
4169 unsigned short *array;
4171 struct semid_ds semid_ds;
4174 semun.buf = &semid_ds;
4176 ret = semctl(semid, 0, IPC_STAT, semun);
4178 return get_errno(ret);
4180 nsems = semid_ds.sem_nsems;
4182 *host_array = g_try_new(unsigned short, nsems);
4184 return -TARGET_ENOMEM;
4186 array = lock_user(VERIFY_READ, target_addr,
4187 nsems*sizeof(unsigned short), 1);
4189 g_free(*host_array);
4190 return -TARGET_EFAULT;
4193 for(i=0; i<nsems; i++) {
4194 __get_user((*host_array)[i], &array[i]);
4196 unlock_user(array, target_addr, 0);
4201 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4202 unsigned short **host_array)
4205 unsigned short *array;
4207 struct semid_ds semid_ds;
4210 semun.buf = &semid_ds;
4212 ret = semctl(semid, 0, IPC_STAT, semun);
4214 return get_errno(ret);
4216 nsems = semid_ds.sem_nsems;
4218 array = lock_user(VERIFY_WRITE, target_addr,
4219 nsems*sizeof(unsigned short), 0);
4221 return -TARGET_EFAULT;
4223 for(i=0; i<nsems; i++) {
4224 __put_user((*host_array)[i], &array[i]);
4226 g_free(*host_array);
4227 unlock_user(array, target_addr, 1);
4232 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4233 abi_ulong target_arg)
4235 union target_semun target_su = { .buf = target_arg };
4237 struct semid_ds dsarg;
4238 unsigned short *array = NULL;
4239 struct seminfo seminfo;
4240 abi_long ret = -TARGET_EINVAL;
4247 /* In 64 bit cross-endian situations, we will erroneously pick up
4248 * the wrong half of the union for the "val" element. To rectify
4249 * this, the entire 8-byte structure is byteswapped, followed by
4250 * a swap of the 4 byte val field. In other cases, the data is
4251 * already in proper host byte order. */
4252 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4253 target_su.buf = tswapal(target_su.buf);
4254 arg.val = tswap32(target_su.val);
4256 arg.val = target_su.val;
4258 ret = get_errno(semctl(semid, semnum, cmd, arg));
4262 err = target_to_host_semarray(semid, &array, target_su.array);
4266 ret = get_errno(semctl(semid, semnum, cmd, arg));
4267 err = host_to_target_semarray(semid, target_su.array, &array);
4274 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4278 ret = get_errno(semctl(semid, semnum, cmd, arg));
4279 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4285 arg.__buf = &seminfo;
4286 ret = get_errno(semctl(semid, semnum, cmd, arg));
4287 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4295 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4302 struct target_sembuf {
4303 unsigned short sem_num;
4308 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4309 abi_ulong target_addr,
4312 struct target_sembuf *target_sembuf;
4315 target_sembuf = lock_user(VERIFY_READ, target_addr,
4316 nsops*sizeof(struct target_sembuf), 1);
4318 return -TARGET_EFAULT;
4320 for(i=0; i<nsops; i++) {
4321 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4322 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4323 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4326 unlock_user(target_sembuf, target_addr, 0);
4331 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4333 struct sembuf sops[nsops];
4335 if (target_to_host_sembuf(sops, ptr, nsops))
4336 return -TARGET_EFAULT;
4338 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4341 struct target_msqid_ds
4343 struct target_ipc_perm msg_perm;
4344 abi_ulong msg_stime;
4345 #if TARGET_ABI_BITS == 32
4346 abi_ulong __unused1;
4348 abi_ulong msg_rtime;
4349 #if TARGET_ABI_BITS == 32
4350 abi_ulong __unused2;
4352 abi_ulong msg_ctime;
4353 #if TARGET_ABI_BITS == 32
4354 abi_ulong __unused3;
4356 abi_ulong __msg_cbytes;
4358 abi_ulong msg_qbytes;
4359 abi_ulong msg_lspid;
4360 abi_ulong msg_lrpid;
4361 abi_ulong __unused4;
4362 abi_ulong __unused5;
4365 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4366 abi_ulong target_addr)
4368 struct target_msqid_ds *target_md;
4370 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4371 return -TARGET_EFAULT;
4372 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4373 return -TARGET_EFAULT;
4374 host_md->msg_stime = tswapal(target_md->msg_stime);
4375 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4376 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4377 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4378 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4379 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4380 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4381 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4382 unlock_user_struct(target_md, target_addr, 0);
4386 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4387 struct msqid_ds *host_md)
4389 struct target_msqid_ds *target_md;
4391 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4392 return -TARGET_EFAULT;
4393 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4394 return -TARGET_EFAULT;
4395 target_md->msg_stime = tswapal(host_md->msg_stime);
4396 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4397 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4398 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4399 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4400 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4401 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4402 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4403 unlock_user_struct(target_md, target_addr, 1);
4407 struct target_msginfo {
4415 unsigned short int msgseg;
4418 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4419 struct msginfo *host_msginfo)
4421 struct target_msginfo *target_msginfo;
4422 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4423 return -TARGET_EFAULT;
4424 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4425 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4426 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4427 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4428 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4429 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4430 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4431 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4432 unlock_user_struct(target_msginfo, target_addr, 1);
4436 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4438 struct msqid_ds dsarg;
4439 struct msginfo msginfo;
4440 abi_long ret = -TARGET_EINVAL;
4448 if (target_to_host_msqid_ds(&dsarg,ptr))
4449 return -TARGET_EFAULT;
4450 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4451 if (host_to_target_msqid_ds(ptr,&dsarg))
4452 return -TARGET_EFAULT;
4455 ret = get_errno(msgctl(msgid, cmd, NULL));
4459 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4460 if (host_to_target_msginfo(ptr, &msginfo))
4461 return -TARGET_EFAULT;
4468 struct target_msgbuf {
4473 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4474 ssize_t msgsz, int msgflg)
4476 struct target_msgbuf *target_mb;
4477 struct msgbuf *host_mb;
4481 return -TARGET_EINVAL;
4484 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4485 return -TARGET_EFAULT;
4486 host_mb = g_try_malloc(msgsz + sizeof(long));
4488 unlock_user_struct(target_mb, msgp, 0);
4489 return -TARGET_ENOMEM;
4491 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4492 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4493 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4495 unlock_user_struct(target_mb, msgp, 0);
4500 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4501 ssize_t msgsz, abi_long msgtyp,
4504 struct target_msgbuf *target_mb;
4506 struct msgbuf *host_mb;
4510 return -TARGET_EINVAL;
4513 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4514 return -TARGET_EFAULT;
4516 host_mb = g_try_malloc(msgsz + sizeof(long));
4518 ret = -TARGET_ENOMEM;
4521 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4524 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4525 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4526 if (!target_mtext) {
4527 ret = -TARGET_EFAULT;
4530 memcpy(target_mb->mtext, host_mb->mtext, ret);
4531 unlock_user(target_mtext, target_mtext_addr, ret);
4534 target_mb->mtype = tswapal(host_mb->mtype);
4538 unlock_user_struct(target_mb, msgp, 1);
4543 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4544 abi_ulong target_addr)
4546 struct target_shmid_ds *target_sd;
4548 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4549 return -TARGET_EFAULT;
4550 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4551 return -TARGET_EFAULT;
4552 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4553 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4554 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4555 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4556 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4557 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4558 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4559 unlock_user_struct(target_sd, target_addr, 0);
4563 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4564 struct shmid_ds *host_sd)
4566 struct target_shmid_ds *target_sd;
4568 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4569 return -TARGET_EFAULT;
4570 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4571 return -TARGET_EFAULT;
4572 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4573 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4574 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4575 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4576 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4577 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4578 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4579 unlock_user_struct(target_sd, target_addr, 1);
4583 struct target_shminfo {
4591 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4592 struct shminfo *host_shminfo)
4594 struct target_shminfo *target_shminfo;
4595 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4596 return -TARGET_EFAULT;
4597 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4598 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4599 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4600 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4601 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4602 unlock_user_struct(target_shminfo, target_addr, 1);
4606 struct target_shm_info {
4611 abi_ulong swap_attempts;
4612 abi_ulong swap_successes;
4615 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4616 struct shm_info *host_shm_info)
4618 struct target_shm_info *target_shm_info;
4619 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4620 return -TARGET_EFAULT;
4621 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4622 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4623 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4624 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4625 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4626 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4627 unlock_user_struct(target_shm_info, target_addr, 1);
4631 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4633 struct shmid_ds dsarg;
4634 struct shminfo shminfo;
4635 struct shm_info shm_info;
4636 abi_long ret = -TARGET_EINVAL;
4644 if (target_to_host_shmid_ds(&dsarg, buf))
4645 return -TARGET_EFAULT;
4646 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4647 if (host_to_target_shmid_ds(buf, &dsarg))
4648 return -TARGET_EFAULT;
4651 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4652 if (host_to_target_shminfo(buf, &shminfo))
4653 return -TARGET_EFAULT;
4656 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4657 if (host_to_target_shm_info(buf, &shm_info))
4658 return -TARGET_EFAULT;
4663 ret = get_errno(shmctl(shmid, cmd, NULL));
4670 #ifndef TARGET_FORCE_SHMLBA
4671 /* For most architectures, SHMLBA is the same as the page size;
4672 * some architectures have larger values, in which case they should
4673 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4674 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4675 * and defining its own value for SHMLBA.
4677 * The kernel also permits SHMLBA to be set by the architecture to a
4678 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4679 * this means that addresses are rounded to the large size if
4680 * SHM_RND is set but addresses not aligned to that size are not rejected
4681 * as long as they are at least page-aligned. Since the only architecture
4682 * which uses this is ia64 this code doesn't provide for that oddity.
4684 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4686 return TARGET_PAGE_SIZE;
4690 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4691 int shmid, abi_ulong shmaddr, int shmflg)
4695 struct shmid_ds shm_info;
4699 /* find out the length of the shared memory segment */
4700 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4701 if (is_error(ret)) {
4702 /* can't get length, bail out */
4706 shmlba = target_shmlba(cpu_env);
4708 if (shmaddr & (shmlba - 1)) {
4709 if (shmflg & SHM_RND) {
4710 shmaddr &= ~(shmlba - 1);
4712 return -TARGET_EINVAL;
4719 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4721 abi_ulong mmap_start;
4723 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4725 if (mmap_start == -1) {
4727 host_raddr = (void *)-1;
4729 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4732 if (host_raddr == (void *)-1) {
4734 return get_errno((long)host_raddr);
4736 raddr=h2g((unsigned long)host_raddr);
4738 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4739 PAGE_VALID | PAGE_READ |
4740 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4742 for (i = 0; i < N_SHM_REGIONS; i++) {
4743 if (!shm_regions[i].in_use) {
4744 shm_regions[i].in_use = true;
4745 shm_regions[i].start = raddr;
4746 shm_regions[i].size = shm_info.shm_segsz;
4756 static inline abi_long do_shmdt(abi_ulong shmaddr)
4760 for (i = 0; i < N_SHM_REGIONS; ++i) {
4761 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4762 shm_regions[i].in_use = false;
4763 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4768 return get_errno(shmdt(g2h(shmaddr)));
4771 #ifdef TARGET_NR_ipc
4772 /* ??? This only works with linear mappings. */
4773 /* do_ipc() must return target values and target errnos. */
4774 static abi_long do_ipc(CPUArchState *cpu_env,
4775 unsigned int call, abi_long first,
4776 abi_long second, abi_long third,
4777 abi_long ptr, abi_long fifth)
4782 version = call >> 16;
4787 ret = do_semop(first, ptr, second);
4791 ret = get_errno(semget(first, second, third));
4794 case IPCOP_semctl: {
4795 /* The semun argument to semctl is passed by value, so dereference the
4798 get_user_ual(atptr, ptr);
4799 ret = do_semctl(first, second, third, atptr);
4804 ret = get_errno(msgget(first, second));
4808 ret = do_msgsnd(first, ptr, second, third);
4812 ret = do_msgctl(first, second, ptr);
4819 struct target_ipc_kludge {
4824 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4825 ret = -TARGET_EFAULT;
4829 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4831 unlock_user_struct(tmp, ptr, 0);
4835 ret = do_msgrcv(first, ptr, second, fifth, third);
4844 raddr = do_shmat(cpu_env, first, ptr, second);
4845 if (is_error(raddr))
4846 return get_errno(raddr);
4847 if (put_user_ual(raddr, third))
4848 return -TARGET_EFAULT;
4852 ret = -TARGET_EINVAL;
4857 ret = do_shmdt(ptr);
4861 /* IPC_* flag values are the same on all linux platforms */
4862 ret = get_errno(shmget(first, second, third));
4865 /* IPC_* and SHM_* command values are the same on all linux platforms */
4867 ret = do_shmctl(first, second, ptr);
4870 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4871 ret = -TARGET_ENOSYS;
4878 /* kernel structure types definitions */
4880 #define STRUCT(name, ...) STRUCT_ ## name,
4881 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4883 #include "syscall_types.h"
4887 #undef STRUCT_SPECIAL
4889 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4890 #define STRUCT_SPECIAL(name)
4891 #include "syscall_types.h"
4893 #undef STRUCT_SPECIAL
4895 typedef struct IOCTLEntry IOCTLEntry;
4897 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4898 int fd, int cmd, abi_long arg);
4902 unsigned int host_cmd;
4905 do_ioctl_fn *do_ioctl;
4906 const argtype arg_type[5];
4909 #define IOC_R 0x0001
4910 #define IOC_W 0x0002
4911 #define IOC_RW (IOC_R | IOC_W)
4913 #define MAX_STRUCT_SIZE 4096
4915 #ifdef CONFIG_FIEMAP
4916 /* So fiemap access checks don't overflow on 32 bit systems.
4917 * This is very slightly smaller than the limit imposed by
4918 * the underlying kernel.
4920 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4921 / sizeof(struct fiemap_extent))
4923 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4924 int fd, int cmd, abi_long arg)
4926 /* The parameter for this ioctl is a struct fiemap followed
4927 * by an array of struct fiemap_extent whose size is set
4928 * in fiemap->fm_extent_count. The array is filled in by the
4931 int target_size_in, target_size_out;
4933 const argtype *arg_type = ie->arg_type;
4934 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4937 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4941 assert(arg_type[0] == TYPE_PTR);
4942 assert(ie->access == IOC_RW);
4944 target_size_in = thunk_type_size(arg_type, 0);
4945 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4947 return -TARGET_EFAULT;
4949 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4950 unlock_user(argptr, arg, 0);
4951 fm = (struct fiemap *)buf_temp;
4952 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4953 return -TARGET_EINVAL;
4956 outbufsz = sizeof (*fm) +
4957 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4959 if (outbufsz > MAX_STRUCT_SIZE) {
4960 /* We can't fit all the extents into the fixed size buffer.
4961 * Allocate one that is large enough and use it instead.
4963 fm = g_try_malloc(outbufsz);
4965 return -TARGET_ENOMEM;
4967 memcpy(fm, buf_temp, sizeof(struct fiemap));
4970 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4971 if (!is_error(ret)) {
4972 target_size_out = target_size_in;
4973 /* An extent_count of 0 means we were only counting the extents
4974 * so there are no structs to copy
4976 if (fm->fm_extent_count != 0) {
4977 target_size_out += fm->fm_mapped_extents * extent_size;
4979 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4981 ret = -TARGET_EFAULT;
4983 /* Convert the struct fiemap */
4984 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4985 if (fm->fm_extent_count != 0) {
4986 p = argptr + target_size_in;
4987 /* ...and then all the struct fiemap_extents */
4988 for (i = 0; i < fm->fm_mapped_extents; i++) {
4989 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4994 unlock_user(argptr, arg, target_size_out);
5004 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5005 int fd, int cmd, abi_long arg)
5007 const argtype *arg_type = ie->arg_type;
5011 struct ifconf *host_ifconf;
5013 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5014 int target_ifreq_size;
5019 abi_long target_ifc_buf;
5023 assert(arg_type[0] == TYPE_PTR);
5024 assert(ie->access == IOC_RW);
5027 target_size = thunk_type_size(arg_type, 0);
5029 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5031 return -TARGET_EFAULT;
5032 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5033 unlock_user(argptr, arg, 0);
5035 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5036 target_ifc_len = host_ifconf->ifc_len;
5037 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5039 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5040 nb_ifreq = target_ifc_len / target_ifreq_size;
5041 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5043 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5044 if (outbufsz > MAX_STRUCT_SIZE) {
5045 /* We can't fit all the extents into the fixed size buffer.
5046 * Allocate one that is large enough and use it instead.
5048 host_ifconf = malloc(outbufsz);
5050 return -TARGET_ENOMEM;
5052 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5055 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5057 host_ifconf->ifc_len = host_ifc_len;
5058 host_ifconf->ifc_buf = host_ifc_buf;
5060 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5061 if (!is_error(ret)) {
5062 /* convert host ifc_len to target ifc_len */
5064 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5065 target_ifc_len = nb_ifreq * target_ifreq_size;
5066 host_ifconf->ifc_len = target_ifc_len;
5068 /* restore target ifc_buf */
5070 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5072 /* copy struct ifconf to target user */
5074 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5076 return -TARGET_EFAULT;
5077 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5078 unlock_user(argptr, arg, target_size);
5080 /* copy ifreq[] to target user */
5082 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5083 for (i = 0; i < nb_ifreq ; i++) {
5084 thunk_convert(argptr + i * target_ifreq_size,
5085 host_ifc_buf + i * sizeof(struct ifreq),
5086 ifreq_arg_type, THUNK_TARGET);
5088 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5098 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5099 int cmd, abi_long arg)
5102 struct dm_ioctl *host_dm;
5103 abi_long guest_data;
5104 uint32_t guest_data_size;
5106 const argtype *arg_type = ie->arg_type;
5108 void *big_buf = NULL;
5112 target_size = thunk_type_size(arg_type, 0);
5113 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5115 ret = -TARGET_EFAULT;
5118 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5119 unlock_user(argptr, arg, 0);
5121 /* buf_temp is too small, so fetch things into a bigger buffer */
5122 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5123 memcpy(big_buf, buf_temp, target_size);
5127 guest_data = arg + host_dm->data_start;
5128 if ((guest_data - arg) < 0) {
5129 ret = -TARGET_EINVAL;
5132 guest_data_size = host_dm->data_size - host_dm->data_start;
5133 host_data = (char*)host_dm + host_dm->data_start;
5135 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5137 ret = -TARGET_EFAULT;
5141 switch (ie->host_cmd) {
5143 case DM_LIST_DEVICES:
5146 case DM_DEV_SUSPEND:
5149 case DM_TABLE_STATUS:
5150 case DM_TABLE_CLEAR:
5152 case DM_LIST_VERSIONS:
5156 case DM_DEV_SET_GEOMETRY:
5157 /* data contains only strings */
5158 memcpy(host_data, argptr, guest_data_size);
5161 memcpy(host_data, argptr, guest_data_size);
5162 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5166 void *gspec = argptr;
5167 void *cur_data = host_data;
5168 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5169 int spec_size = thunk_type_size(arg_type, 0);
5172 for (i = 0; i < host_dm->target_count; i++) {
5173 struct dm_target_spec *spec = cur_data;
5177 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5178 slen = strlen((char*)gspec + spec_size) + 1;
5180 spec->next = sizeof(*spec) + slen;
5181 strcpy((char*)&spec[1], gspec + spec_size);
5183 cur_data += spec->next;
5188 ret = -TARGET_EINVAL;
5189 unlock_user(argptr, guest_data, 0);
5192 unlock_user(argptr, guest_data, 0);
5194 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5195 if (!is_error(ret)) {
5196 guest_data = arg + host_dm->data_start;
5197 guest_data_size = host_dm->data_size - host_dm->data_start;
5198 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5199 switch (ie->host_cmd) {
5204 case DM_DEV_SUSPEND:
5207 case DM_TABLE_CLEAR:
5209 case DM_DEV_SET_GEOMETRY:
5210 /* no return data */
5212 case DM_LIST_DEVICES:
5214 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5215 uint32_t remaining_data = guest_data_size;
5216 void *cur_data = argptr;
5217 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5218 int nl_size = 12; /* can't use thunk_size due to alignment */
5221 uint32_t next = nl->next;
5223 nl->next = nl_size + (strlen(nl->name) + 1);
5225 if (remaining_data < nl->next) {
5226 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5229 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5230 strcpy(cur_data + nl_size, nl->name);
5231 cur_data += nl->next;
5232 remaining_data -= nl->next;
5236 nl = (void*)nl + next;
5241 case DM_TABLE_STATUS:
5243 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5244 void *cur_data = argptr;
5245 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5246 int spec_size = thunk_type_size(arg_type, 0);
5249 for (i = 0; i < host_dm->target_count; i++) {
5250 uint32_t next = spec->next;
5251 int slen = strlen((char*)&spec[1]) + 1;
5252 spec->next = (cur_data - argptr) + spec_size + slen;
5253 if (guest_data_size < spec->next) {
5254 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5257 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5258 strcpy(cur_data + spec_size, (char*)&spec[1]);
5259 cur_data = argptr + spec->next;
5260 spec = (void*)host_dm + host_dm->data_start + next;
5266 void *hdata = (void*)host_dm + host_dm->data_start;
5267 int count = *(uint32_t*)hdata;
5268 uint64_t *hdev = hdata + 8;
5269 uint64_t *gdev = argptr + 8;
5272 *(uint32_t*)argptr = tswap32(count);
5273 for (i = 0; i < count; i++) {
5274 *gdev = tswap64(*hdev);
5280 case DM_LIST_VERSIONS:
5282 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5283 uint32_t remaining_data = guest_data_size;
5284 void *cur_data = argptr;
5285 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5286 int vers_size = thunk_type_size(arg_type, 0);
5289 uint32_t next = vers->next;
5291 vers->next = vers_size + (strlen(vers->name) + 1);
5293 if (remaining_data < vers->next) {
5294 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5297 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5298 strcpy(cur_data + vers_size, vers->name);
5299 cur_data += vers->next;
5300 remaining_data -= vers->next;
5304 vers = (void*)vers + next;
5309 unlock_user(argptr, guest_data, 0);
5310 ret = -TARGET_EINVAL;
5313 unlock_user(argptr, guest_data, guest_data_size);
5315 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5317 ret = -TARGET_EFAULT;
5320 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5321 unlock_user(argptr, arg, target_size);
5328 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5329 int cmd, abi_long arg)
5333 const argtype *arg_type = ie->arg_type;
5334 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5337 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5338 struct blkpg_partition host_part;
5340 /* Read and convert blkpg */
5342 target_size = thunk_type_size(arg_type, 0);
5343 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5345 ret = -TARGET_EFAULT;
5348 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5349 unlock_user(argptr, arg, 0);
5351 switch (host_blkpg->op) {
5352 case BLKPG_ADD_PARTITION:
5353 case BLKPG_DEL_PARTITION:
5354 /* payload is struct blkpg_partition */
5357 /* Unknown opcode */
5358 ret = -TARGET_EINVAL;
5362 /* Read and convert blkpg->data */
5363 arg = (abi_long)(uintptr_t)host_blkpg->data;
5364 target_size = thunk_type_size(part_arg_type, 0);
5365 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5367 ret = -TARGET_EFAULT;
5370 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5371 unlock_user(argptr, arg, 0);
5373 /* Swizzle the data pointer to our local copy and call! */
5374 host_blkpg->data = &host_part;
5375 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5381 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5382 int fd, int cmd, abi_long arg)
5384 const argtype *arg_type = ie->arg_type;
5385 const StructEntry *se;
5386 const argtype *field_types;
5387 const int *dst_offsets, *src_offsets;
5390 abi_ulong *target_rt_dev_ptr;
5391 unsigned long *host_rt_dev_ptr;
5395 assert(ie->access == IOC_W);
5396 assert(*arg_type == TYPE_PTR);
5398 assert(*arg_type == TYPE_STRUCT);
5399 target_size = thunk_type_size(arg_type, 0);
5400 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5402 return -TARGET_EFAULT;
5405 assert(*arg_type == (int)STRUCT_rtentry);
5406 se = struct_entries + *arg_type++;
5407 assert(se->convert[0] == NULL);
5408 /* convert struct here to be able to catch rt_dev string */
5409 field_types = se->field_types;
5410 dst_offsets = se->field_offsets[THUNK_HOST];
5411 src_offsets = se->field_offsets[THUNK_TARGET];
5412 for (i = 0; i < se->nb_fields; i++) {
5413 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5414 assert(*field_types == TYPE_PTRVOID);
5415 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5416 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5417 if (*target_rt_dev_ptr != 0) {
5418 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5419 tswapal(*target_rt_dev_ptr));
5420 if (!*host_rt_dev_ptr) {
5421 unlock_user(argptr, arg, 0);
5422 return -TARGET_EFAULT;
5425 *host_rt_dev_ptr = 0;
5430 field_types = thunk_convert(buf_temp + dst_offsets[i],
5431 argptr + src_offsets[i],
5432 field_types, THUNK_HOST);
5434 unlock_user(argptr, arg, 0);
5436 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5437 if (*host_rt_dev_ptr != 0) {
5438 unlock_user((void *)*host_rt_dev_ptr,
5439 *target_rt_dev_ptr, 0);
5444 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5445 int fd, int cmd, abi_long arg)
5447 int sig = target_to_host_signal(arg);
5448 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5451 static IOCTLEntry ioctl_entries[] = {
5452 #define IOCTL(cmd, access, ...) \
5453 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5454 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5455 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5460 /* ??? Implement proper locking for ioctls. */
5461 /* do_ioctl() Must return target values and target errnos. */
5462 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5464 const IOCTLEntry *ie;
5465 const argtype *arg_type;
5467 uint8_t buf_temp[MAX_STRUCT_SIZE];
5473 if (ie->target_cmd == 0) {
5474 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5475 return -TARGET_ENOSYS;
5477 if (ie->target_cmd == cmd)
5481 arg_type = ie->arg_type;
5483 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5486 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5489 switch(arg_type[0]) {
5492 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5496 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5500 target_size = thunk_type_size(arg_type, 0);
5501 switch(ie->access) {
5503 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5504 if (!is_error(ret)) {
5505 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5507 return -TARGET_EFAULT;
5508 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5509 unlock_user(argptr, arg, target_size);
5513 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5515 return -TARGET_EFAULT;
5516 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5517 unlock_user(argptr, arg, 0);
5518 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5522 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5524 return -TARGET_EFAULT;
5525 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5526 unlock_user(argptr, arg, 0);
5527 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5528 if (!is_error(ret)) {
5529 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5531 return -TARGET_EFAULT;
5532 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5533 unlock_user(argptr, arg, target_size);
5539 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5540 (long)cmd, arg_type[0]);
5541 ret = -TARGET_ENOSYS;
5547 static const bitmask_transtbl iflag_tbl[] = {
5548 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5549 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5550 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5551 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5552 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5553 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5554 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5555 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5556 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5557 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5558 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5559 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5560 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5561 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5565 static const bitmask_transtbl oflag_tbl[] = {
5566 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5567 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5568 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5569 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5570 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5571 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5572 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5573 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5574 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5575 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5576 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5577 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5578 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5579 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5580 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5581 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5582 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5583 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5584 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5585 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5586 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5587 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5588 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5589 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5593 static const bitmask_transtbl cflag_tbl[] = {
5594 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5595 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5596 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5597 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5598 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5599 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5600 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5601 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5602 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5603 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5604 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5605 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5606 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5607 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5608 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5609 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5610 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5611 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5612 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5613 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5614 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5615 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5616 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5617 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5618 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5619 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5620 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5621 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5622 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5623 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5624 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5628 static const bitmask_transtbl lflag_tbl[] = {
5629 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5630 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5631 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5632 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5633 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5634 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5635 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5636 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5637 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5638 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5639 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5640 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5641 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5642 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5643 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5647 static void target_to_host_termios (void *dst, const void *src)
5649 struct host_termios *host = dst;
5650 const struct target_termios *target = src;
5653 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5655 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5657 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5659 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5660 host->c_line = target->c_line;
5662 memset(host->c_cc, 0, sizeof(host->c_cc));
5663 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5664 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5665 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5666 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5667 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5668 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5669 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5670 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5671 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5672 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5673 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5674 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5675 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5676 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5677 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5678 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5679 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5682 static void host_to_target_termios (void *dst, const void *src)
5684 struct target_termios *target = dst;
5685 const struct host_termios *host = src;
5688 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5690 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5692 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5694 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5695 target->c_line = host->c_line;
5697 memset(target->c_cc, 0, sizeof(target->c_cc));
5698 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5699 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5700 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5701 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5702 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5703 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5704 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5705 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5706 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5707 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5708 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5709 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5710 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5711 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5712 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5713 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5714 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5717 static const StructEntry struct_termios_def = {
5718 .convert = { host_to_target_termios, target_to_host_termios },
5719 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5720 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5723 static bitmask_transtbl mmap_flags_tbl[] = {
5724 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5725 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5726 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5727 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5728 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5729 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5730 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5731 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5732 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5737 #if defined(TARGET_I386)
5739 /* NOTE: there is really one LDT for all the threads */
5740 static uint8_t *ldt_table;
5742 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5749 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5750 if (size > bytecount)
5752 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5754 return -TARGET_EFAULT;
5755 /* ??? Should this by byteswapped? */
5756 memcpy(p, ldt_table, size);
5757 unlock_user(p, ptr, size);
5761 /* XXX: add locking support */
5762 static abi_long write_ldt(CPUX86State *env,
5763 abi_ulong ptr, unsigned long bytecount, int oldmode)
5765 struct target_modify_ldt_ldt_s ldt_info;
5766 struct target_modify_ldt_ldt_s *target_ldt_info;
5767 int seg_32bit, contents, read_exec_only, limit_in_pages;
5768 int seg_not_present, useable, lm;
5769 uint32_t *lp, entry_1, entry_2;
5771 if (bytecount != sizeof(ldt_info))
5772 return -TARGET_EINVAL;
5773 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5774 return -TARGET_EFAULT;
5775 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5776 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5777 ldt_info.limit = tswap32(target_ldt_info->limit);
5778 ldt_info.flags = tswap32(target_ldt_info->flags);
5779 unlock_user_struct(target_ldt_info, ptr, 0);
5781 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5782 return -TARGET_EINVAL;
5783 seg_32bit = ldt_info.flags & 1;
5784 contents = (ldt_info.flags >> 1) & 3;
5785 read_exec_only = (ldt_info.flags >> 3) & 1;
5786 limit_in_pages = (ldt_info.flags >> 4) & 1;
5787 seg_not_present = (ldt_info.flags >> 5) & 1;
5788 useable = (ldt_info.flags >> 6) & 1;
5792 lm = (ldt_info.flags >> 7) & 1;
5794 if (contents == 3) {
5796 return -TARGET_EINVAL;
5797 if (seg_not_present == 0)
5798 return -TARGET_EINVAL;
5800 /* allocate the LDT */
5802 env->ldt.base = target_mmap(0,
5803 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5804 PROT_READ|PROT_WRITE,
5805 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5806 if (env->ldt.base == -1)
5807 return -TARGET_ENOMEM;
5808 memset(g2h(env->ldt.base), 0,
5809 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5810 env->ldt.limit = 0xffff;
5811 ldt_table = g2h(env->ldt.base);
5814 /* NOTE: same code as Linux kernel */
5815 /* Allow LDTs to be cleared by the user. */
5816 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5819 read_exec_only == 1 &&
5821 limit_in_pages == 0 &&
5822 seg_not_present == 1 &&
5830 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5831 (ldt_info.limit & 0x0ffff);
5832 entry_2 = (ldt_info.base_addr & 0xff000000) |
5833 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5834 (ldt_info.limit & 0xf0000) |
5835 ((read_exec_only ^ 1) << 9) |
5837 ((seg_not_present ^ 1) << 15) |
5839 (limit_in_pages << 23) |
5843 entry_2 |= (useable << 20);
5845 /* Install the new entry ... */
5847 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5848 lp[0] = tswap32(entry_1);
5849 lp[1] = tswap32(entry_2);
5853 /* specific and weird i386 syscalls */
5854 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5855 unsigned long bytecount)
5861 ret = read_ldt(ptr, bytecount);
5864 ret = write_ldt(env, ptr, bytecount, 1);
5867 ret = write_ldt(env, ptr, bytecount, 0);
5870 ret = -TARGET_ENOSYS;
5876 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5877 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5879 uint64_t *gdt_table = g2h(env->gdt.base);
5880 struct target_modify_ldt_ldt_s ldt_info;
5881 struct target_modify_ldt_ldt_s *target_ldt_info;
5882 int seg_32bit, contents, read_exec_only, limit_in_pages;
5883 int seg_not_present, useable, lm;
5884 uint32_t *lp, entry_1, entry_2;
5887 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5888 if (!target_ldt_info)
5889 return -TARGET_EFAULT;
5890 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5891 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5892 ldt_info.limit = tswap32(target_ldt_info->limit);
5893 ldt_info.flags = tswap32(target_ldt_info->flags);
5894 if (ldt_info.entry_number == -1) {
5895 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5896 if (gdt_table[i] == 0) {
5897 ldt_info.entry_number = i;
5898 target_ldt_info->entry_number = tswap32(i);
5903 unlock_user_struct(target_ldt_info, ptr, 1);
5905 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5906 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5907 return -TARGET_EINVAL;
5908 seg_32bit = ldt_info.flags & 1;
5909 contents = (ldt_info.flags >> 1) & 3;
5910 read_exec_only = (ldt_info.flags >> 3) & 1;
5911 limit_in_pages = (ldt_info.flags >> 4) & 1;
5912 seg_not_present = (ldt_info.flags >> 5) & 1;
5913 useable = (ldt_info.flags >> 6) & 1;
5917 lm = (ldt_info.flags >> 7) & 1;
5920 if (contents == 3) {
5921 if (seg_not_present == 0)
5922 return -TARGET_EINVAL;
5925 /* NOTE: same code as Linux kernel */
5926 /* Allow LDTs to be cleared by the user. */
5927 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5928 if ((contents == 0 &&
5929 read_exec_only == 1 &&
5931 limit_in_pages == 0 &&
5932 seg_not_present == 1 &&
5940 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5941 (ldt_info.limit & 0x0ffff);
5942 entry_2 = (ldt_info.base_addr & 0xff000000) |
5943 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5944 (ldt_info.limit & 0xf0000) |
5945 ((read_exec_only ^ 1) << 9) |
5947 ((seg_not_present ^ 1) << 15) |
5949 (limit_in_pages << 23) |
5954 /* Install the new entry ... */
5956 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5957 lp[0] = tswap32(entry_1);
5958 lp[1] = tswap32(entry_2);
5962 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5964 struct target_modify_ldt_ldt_s *target_ldt_info;
5965 uint64_t *gdt_table = g2h(env->gdt.base);
5966 uint32_t base_addr, limit, flags;
5967 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5968 int seg_not_present, useable, lm;
5969 uint32_t *lp, entry_1, entry_2;
5971 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5972 if (!target_ldt_info)
5973 return -TARGET_EFAULT;
5974 idx = tswap32(target_ldt_info->entry_number);
5975 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5976 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5977 unlock_user_struct(target_ldt_info, ptr, 1);
5978 return -TARGET_EINVAL;
5980 lp = (uint32_t *)(gdt_table + idx);
5981 entry_1 = tswap32(lp[0]);
5982 entry_2 = tswap32(lp[1]);
5984 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5985 contents = (entry_2 >> 10) & 3;
5986 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5987 seg_32bit = (entry_2 >> 22) & 1;
5988 limit_in_pages = (entry_2 >> 23) & 1;
5989 useable = (entry_2 >> 20) & 1;
5993 lm = (entry_2 >> 21) & 1;
5995 flags = (seg_32bit << 0) | (contents << 1) |
5996 (read_exec_only << 3) | (limit_in_pages << 4) |
5997 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5998 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5999 base_addr = (entry_1 >> 16) |
6000 (entry_2 & 0xff000000) |
6001 ((entry_2 & 0xff) << 16);
6002 target_ldt_info->base_addr = tswapal(base_addr);
6003 target_ldt_info->limit = tswap32(limit);
6004 target_ldt_info->flags = tswap32(flags);
6005 unlock_user_struct(target_ldt_info, ptr, 1);
6008 #endif /* TARGET_I386 && TARGET_ABI32 */
6010 #ifndef TARGET_ABI32
6011 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6018 case TARGET_ARCH_SET_GS:
6019 case TARGET_ARCH_SET_FS:
6020 if (code == TARGET_ARCH_SET_GS)
6024 cpu_x86_load_seg(env, idx, 0);
6025 env->segs[idx].base = addr;
6027 case TARGET_ARCH_GET_GS:
6028 case TARGET_ARCH_GET_FS:
6029 if (code == TARGET_ARCH_GET_GS)
6033 val = env->segs[idx].base;
6034 if (put_user(val, addr, abi_ulong))
6035 ret = -TARGET_EFAULT;
6038 ret = -TARGET_EINVAL;
6045 #endif /* defined(TARGET_I386) */
6047 #define NEW_STACK_SIZE 0x40000
6050 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6053 pthread_mutex_t mutex;
6054 pthread_cond_t cond;
6057 abi_ulong child_tidptr;
6058 abi_ulong parent_tidptr;
6062 static void *clone_func(void *arg)
6064 new_thread_info *info = arg;
6069 rcu_register_thread();
6071 cpu = ENV_GET_CPU(env);
6073 ts = (TaskState *)cpu->opaque;
6074 info->tid = gettid();
6075 cpu->host_tid = info->tid;
6077 if (info->child_tidptr)
6078 put_user_u32(info->tid, info->child_tidptr);
6079 if (info->parent_tidptr)
6080 put_user_u32(info->tid, info->parent_tidptr);
6081 /* Enable signals. */
6082 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6083 /* Signal to the parent that we're ready. */
6084 pthread_mutex_lock(&info->mutex);
6085 pthread_cond_broadcast(&info->cond);
6086 pthread_mutex_unlock(&info->mutex);
6087 /* Wait until the parent has finshed initializing the tls state. */
6088 pthread_mutex_lock(&clone_lock);
6089 pthread_mutex_unlock(&clone_lock);
6095 /* do_fork() Must return host values and target errnos (unlike most
6096 do_*() functions). */
6097 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6098 abi_ulong parent_tidptr, target_ulong newtls,
6099 abi_ulong child_tidptr)
6101 CPUState *cpu = ENV_GET_CPU(env);
6105 CPUArchState *new_env;
6108 flags &= ~CLONE_IGNORED_FLAGS;
6110 /* Emulate vfork() with fork() */
6111 if (flags & CLONE_VFORK)
6112 flags &= ~(CLONE_VFORK | CLONE_VM);
6114 if (flags & CLONE_VM) {
6115 TaskState *parent_ts = (TaskState *)cpu->opaque;
6116 new_thread_info info;
6117 pthread_attr_t attr;
6119 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6120 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6121 return -TARGET_EINVAL;
6124 ts = g_new0(TaskState, 1);
6125 init_task_state(ts);
6126 /* we create a new CPU instance. */
6127 new_env = cpu_copy(env);
6128 /* Init regs that differ from the parent. */
6129 cpu_clone_regs(new_env, newsp);
6130 new_cpu = ENV_GET_CPU(new_env);
6131 new_cpu->opaque = ts;
6132 ts->bprm = parent_ts->bprm;
6133 ts->info = parent_ts->info;
6134 ts->signal_mask = parent_ts->signal_mask;
6136 if (flags & CLONE_CHILD_CLEARTID) {
6137 ts->child_tidptr = child_tidptr;
6140 if (flags & CLONE_SETTLS) {
6141 cpu_set_tls (new_env, newtls);
6144 /* Grab a mutex so that thread setup appears atomic. */
6145 pthread_mutex_lock(&clone_lock);
6147 memset(&info, 0, sizeof(info));
6148 pthread_mutex_init(&info.mutex, NULL);
6149 pthread_mutex_lock(&info.mutex);
6150 pthread_cond_init(&info.cond, NULL);
6152 if (flags & CLONE_CHILD_SETTID) {
6153 info.child_tidptr = child_tidptr;
6155 if (flags & CLONE_PARENT_SETTID) {
6156 info.parent_tidptr = parent_tidptr;
6159 ret = pthread_attr_init(&attr);
6160 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6161 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6162 /* It is not safe to deliver signals until the child has finished
6163 initializing, so temporarily block all signals. */
6164 sigfillset(&sigmask);
6165 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6167 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6168 /* TODO: Free new CPU state if thread creation failed. */
6170 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6171 pthread_attr_destroy(&attr);
6173 /* Wait for the child to initialize. */
6174 pthread_cond_wait(&info.cond, &info.mutex);
6179 pthread_mutex_unlock(&info.mutex);
6180 pthread_cond_destroy(&info.cond);
6181 pthread_mutex_destroy(&info.mutex);
6182 pthread_mutex_unlock(&clone_lock);
6184 /* if no CLONE_VM, we consider it is a fork */
6185 if (flags & CLONE_INVALID_FORK_FLAGS) {
6186 return -TARGET_EINVAL;
6189 /* We can't support custom termination signals */
6190 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6191 return -TARGET_EINVAL;
6194 if (block_signals()) {
6195 return -TARGET_ERESTARTSYS;
6201 /* Child Process. */
6203 cpu_clone_regs(env, newsp);
6205 /* There is a race condition here. The parent process could
6206 theoretically read the TID in the child process before the child
6207 tid is set. This would require using either ptrace
6208 (not implemented) or having *_tidptr to point at a shared memory
6209 mapping. We can't repeat the spinlock hack used above because
6210 the child process gets its own copy of the lock. */
6211 if (flags & CLONE_CHILD_SETTID)
6212 put_user_u32(gettid(), child_tidptr);
6213 if (flags & CLONE_PARENT_SETTID)
6214 put_user_u32(gettid(), parent_tidptr);
6215 ts = (TaskState *)cpu->opaque;
6216 if (flags & CLONE_SETTLS)
6217 cpu_set_tls (env, newtls);
6218 if (flags & CLONE_CHILD_CLEARTID)
6219 ts->child_tidptr = child_tidptr;
6227 /* warning : doesn't handle linux specific flags... */
6228 static int target_to_host_fcntl_cmd(int cmd)
6231 case TARGET_F_DUPFD:
6232 case TARGET_F_GETFD:
6233 case TARGET_F_SETFD:
6234 case TARGET_F_GETFL:
6235 case TARGET_F_SETFL:
6237 case TARGET_F_GETLK:
6239 case TARGET_F_SETLK:
6241 case TARGET_F_SETLKW:
6243 case TARGET_F_GETOWN:
6245 case TARGET_F_SETOWN:
6247 case TARGET_F_GETSIG:
6249 case TARGET_F_SETSIG:
6251 #if TARGET_ABI_BITS == 32
6252 case TARGET_F_GETLK64:
6254 case TARGET_F_SETLK64:
6256 case TARGET_F_SETLKW64:
6259 case TARGET_F_SETLEASE:
6261 case TARGET_F_GETLEASE:
6263 #ifdef F_DUPFD_CLOEXEC
6264 case TARGET_F_DUPFD_CLOEXEC:
6265 return F_DUPFD_CLOEXEC;
6267 case TARGET_F_NOTIFY:
6270 case TARGET_F_GETOWN_EX:
6274 case TARGET_F_SETOWN_EX:
6278 case TARGET_F_SETPIPE_SZ:
6279 return F_SETPIPE_SZ;
6280 case TARGET_F_GETPIPE_SZ:
6281 return F_GETPIPE_SZ;
6284 return -TARGET_EINVAL;
6286 return -TARGET_EINVAL;
6289 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6290 static const bitmask_transtbl flock_tbl[] = {
6291 TRANSTBL_CONVERT(F_RDLCK),
6292 TRANSTBL_CONVERT(F_WRLCK),
6293 TRANSTBL_CONVERT(F_UNLCK),
6294 TRANSTBL_CONVERT(F_EXLCK),
6295 TRANSTBL_CONVERT(F_SHLCK),
6299 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6300 abi_ulong target_flock_addr)
6302 struct target_flock *target_fl;
6305 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6306 return -TARGET_EFAULT;
6309 __get_user(l_type, &target_fl->l_type);
6310 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6311 __get_user(fl->l_whence, &target_fl->l_whence);
6312 __get_user(fl->l_start, &target_fl->l_start);
6313 __get_user(fl->l_len, &target_fl->l_len);
6314 __get_user(fl->l_pid, &target_fl->l_pid);
6315 unlock_user_struct(target_fl, target_flock_addr, 0);
6319 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6320 const struct flock64 *fl)
6322 struct target_flock *target_fl;
6325 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6326 return -TARGET_EFAULT;
6329 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6330 __put_user(l_type, &target_fl->l_type);
6331 __put_user(fl->l_whence, &target_fl->l_whence);
6332 __put_user(fl->l_start, &target_fl->l_start);
6333 __put_user(fl->l_len, &target_fl->l_len);
6334 __put_user(fl->l_pid, &target_fl->l_pid);
6335 unlock_user_struct(target_fl, target_flock_addr, 1);
6339 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6340 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6342 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6343 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6344 abi_ulong target_flock_addr)
6346 struct target_eabi_flock64 *target_fl;
6349 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6350 return -TARGET_EFAULT;
6353 __get_user(l_type, &target_fl->l_type);
6354 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6355 __get_user(fl->l_whence, &target_fl->l_whence);
6356 __get_user(fl->l_start, &target_fl->l_start);
6357 __get_user(fl->l_len, &target_fl->l_len);
6358 __get_user(fl->l_pid, &target_fl->l_pid);
6359 unlock_user_struct(target_fl, target_flock_addr, 0);
6363 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6364 const struct flock64 *fl)
6366 struct target_eabi_flock64 *target_fl;
6369 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6370 return -TARGET_EFAULT;
6373 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6374 __put_user(l_type, &target_fl->l_type);
6375 __put_user(fl->l_whence, &target_fl->l_whence);
6376 __put_user(fl->l_start, &target_fl->l_start);
6377 __put_user(fl->l_len, &target_fl->l_len);
6378 __put_user(fl->l_pid, &target_fl->l_pid);
6379 unlock_user_struct(target_fl, target_flock_addr, 1);
6384 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6385 abi_ulong target_flock_addr)
6387 struct target_flock64 *target_fl;
6390 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6391 return -TARGET_EFAULT;
6394 __get_user(l_type, &target_fl->l_type);
6395 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6396 __get_user(fl->l_whence, &target_fl->l_whence);
6397 __get_user(fl->l_start, &target_fl->l_start);
6398 __get_user(fl->l_len, &target_fl->l_len);
6399 __get_user(fl->l_pid, &target_fl->l_pid);
6400 unlock_user_struct(target_fl, target_flock_addr, 0);
6404 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6405 const struct flock64 *fl)
6407 struct target_flock64 *target_fl;
6410 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6411 return -TARGET_EFAULT;
6414 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6415 __put_user(l_type, &target_fl->l_type);
6416 __put_user(fl->l_whence, &target_fl->l_whence);
6417 __put_user(fl->l_start, &target_fl->l_start);
6418 __put_user(fl->l_len, &target_fl->l_len);
6419 __put_user(fl->l_pid, &target_fl->l_pid);
6420 unlock_user_struct(target_fl, target_flock_addr, 1);
6424 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6426 struct flock64 fl64;
6428 struct f_owner_ex fox;
6429 struct target_f_owner_ex *target_fox;
6432 int host_cmd = target_to_host_fcntl_cmd(cmd);
6434 if (host_cmd == -TARGET_EINVAL)
6438 case TARGET_F_GETLK:
6439 ret = copy_from_user_flock(&fl64, arg);
6443 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6445 ret = copy_to_user_flock(arg, &fl64);
6449 case TARGET_F_SETLK:
6450 case TARGET_F_SETLKW:
6451 ret = copy_from_user_flock(&fl64, arg);
6455 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6458 case TARGET_F_GETLK64:
6459 ret = copy_from_user_flock64(&fl64, arg);
6463 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6465 ret = copy_to_user_flock64(arg, &fl64);
6468 case TARGET_F_SETLK64:
6469 case TARGET_F_SETLKW64:
6470 ret = copy_from_user_flock64(&fl64, arg);
6474 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6477 case TARGET_F_GETFL:
6478 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6480 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6484 case TARGET_F_SETFL:
6485 ret = get_errno(safe_fcntl(fd, host_cmd,
6486 target_to_host_bitmask(arg,
6491 case TARGET_F_GETOWN_EX:
6492 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6494 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6495 return -TARGET_EFAULT;
6496 target_fox->type = tswap32(fox.type);
6497 target_fox->pid = tswap32(fox.pid);
6498 unlock_user_struct(target_fox, arg, 1);
6504 case TARGET_F_SETOWN_EX:
6505 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6506 return -TARGET_EFAULT;
6507 fox.type = tswap32(target_fox->type);
6508 fox.pid = tswap32(target_fox->pid);
6509 unlock_user_struct(target_fox, arg, 0);
6510 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6514 case TARGET_F_SETOWN:
6515 case TARGET_F_GETOWN:
6516 case TARGET_F_SETSIG:
6517 case TARGET_F_GETSIG:
6518 case TARGET_F_SETLEASE:
6519 case TARGET_F_GETLEASE:
6520 case TARGET_F_SETPIPE_SZ:
6521 case TARGET_F_GETPIPE_SZ:
6522 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6526 ret = get_errno(safe_fcntl(fd, cmd, arg));
6534 static inline int high2lowuid(int uid)
6542 static inline int high2lowgid(int gid)
6550 static inline int low2highuid(int uid)
6552 if ((int16_t)uid == -1)
6558 static inline int low2highgid(int gid)
6560 if ((int16_t)gid == -1)
6565 static inline int tswapid(int id)
6570 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6572 #else /* !USE_UID16 */
6573 static inline int high2lowuid(int uid)
6577 static inline int high2lowgid(int gid)
6581 static inline int low2highuid(int uid)
6585 static inline int low2highgid(int gid)
6589 static inline int tswapid(int id)
6594 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6596 #endif /* USE_UID16 */
6598 /* We must do direct syscalls for setting UID/GID, because we want to
6599 * implement the Linux system call semantics of "change only for this thread",
6600 * not the libc/POSIX semantics of "change for all threads in process".
6601 * (See http://ewontfix.com/17/ for more details.)
6602 * We use the 32-bit version of the syscalls if present; if it is not
6603 * then either the host architecture supports 32-bit UIDs natively with
6604 * the standard syscall, or the 16-bit UID is the best we can do.
6606 #ifdef __NR_setuid32
6607 #define __NR_sys_setuid __NR_setuid32
6609 #define __NR_sys_setuid __NR_setuid
6611 #ifdef __NR_setgid32
6612 #define __NR_sys_setgid __NR_setgid32
6614 #define __NR_sys_setgid __NR_setgid
6616 #ifdef __NR_setresuid32
6617 #define __NR_sys_setresuid __NR_setresuid32
6619 #define __NR_sys_setresuid __NR_setresuid
6621 #ifdef __NR_setresgid32
6622 #define __NR_sys_setresgid __NR_setresgid32
6624 #define __NR_sys_setresgid __NR_setresgid
6627 _syscall1(int, sys_setuid, uid_t, uid)
6628 _syscall1(int, sys_setgid, gid_t, gid)
6629 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6630 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6632 void syscall_init(void)
6635 const argtype *arg_type;
6639 thunk_init(STRUCT_MAX);
6641 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6642 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6643 #include "syscall_types.h"
6645 #undef STRUCT_SPECIAL
6647 /* Build target_to_host_errno_table[] table from
6648 * host_to_target_errno_table[]. */
6649 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6650 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6653 /* we patch the ioctl size if necessary. We rely on the fact that
6654 no ioctl has all the bits at '1' in the size field */
6656 while (ie->target_cmd != 0) {
6657 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6658 TARGET_IOC_SIZEMASK) {
6659 arg_type = ie->arg_type;
6660 if (arg_type[0] != TYPE_PTR) {
6661 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6666 size = thunk_type_size(arg_type, 0);
6667 ie->target_cmd = (ie->target_cmd &
6668 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6669 (size << TARGET_IOC_SIZESHIFT);
6672 /* automatic consistency check if same arch */
6673 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6674 (defined(__x86_64__) && defined(TARGET_X86_64))
6675 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6676 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6677 ie->name, ie->target_cmd, ie->host_cmd);
6684 #if TARGET_ABI_BITS == 32
6685 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6687 #ifdef TARGET_WORDS_BIGENDIAN
6688 return ((uint64_t)word0 << 32) | word1;
6690 return ((uint64_t)word1 << 32) | word0;
6693 #else /* TARGET_ABI_BITS == 32 */
6694 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6698 #endif /* TARGET_ABI_BITS != 32 */
6700 #ifdef TARGET_NR_truncate64
6701 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6706 if (regpairs_aligned(cpu_env)) {
6710 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6714 #ifdef TARGET_NR_ftruncate64
6715 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6720 if (regpairs_aligned(cpu_env)) {
6724 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6728 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6729 abi_ulong target_addr)
6731 struct target_timespec *target_ts;
6733 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6734 return -TARGET_EFAULT;
6735 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6736 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6737 unlock_user_struct(target_ts, target_addr, 0);
6741 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6742 struct timespec *host_ts)
6744 struct target_timespec *target_ts;
6746 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6747 return -TARGET_EFAULT;
6748 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6749 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6750 unlock_user_struct(target_ts, target_addr, 1);
6754 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6755 abi_ulong target_addr)
6757 struct target_itimerspec *target_itspec;
6759 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6760 return -TARGET_EFAULT;
6763 host_itspec->it_interval.tv_sec =
6764 tswapal(target_itspec->it_interval.tv_sec);
6765 host_itspec->it_interval.tv_nsec =
6766 tswapal(target_itspec->it_interval.tv_nsec);
6767 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6768 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6770 unlock_user_struct(target_itspec, target_addr, 1);
6774 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6775 struct itimerspec *host_its)
6777 struct target_itimerspec *target_itspec;
6779 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6780 return -TARGET_EFAULT;
6783 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6784 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6786 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6787 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6789 unlock_user_struct(target_itspec, target_addr, 0);
6793 static inline abi_long target_to_host_timex(struct timex *host_tx,
6794 abi_long target_addr)
6796 struct target_timex *target_tx;
6798 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6799 return -TARGET_EFAULT;
6802 __get_user(host_tx->modes, &target_tx->modes);
6803 __get_user(host_tx->offset, &target_tx->offset);
6804 __get_user(host_tx->freq, &target_tx->freq);
6805 __get_user(host_tx->maxerror, &target_tx->maxerror);
6806 __get_user(host_tx->esterror, &target_tx->esterror);
6807 __get_user(host_tx->status, &target_tx->status);
6808 __get_user(host_tx->constant, &target_tx->constant);
6809 __get_user(host_tx->precision, &target_tx->precision);
6810 __get_user(host_tx->tolerance, &target_tx->tolerance);
6811 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6812 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6813 __get_user(host_tx->tick, &target_tx->tick);
6814 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6815 __get_user(host_tx->jitter, &target_tx->jitter);
6816 __get_user(host_tx->shift, &target_tx->shift);
6817 __get_user(host_tx->stabil, &target_tx->stabil);
6818 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6819 __get_user(host_tx->calcnt, &target_tx->calcnt);
6820 __get_user(host_tx->errcnt, &target_tx->errcnt);
6821 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6822 __get_user(host_tx->tai, &target_tx->tai);
6824 unlock_user_struct(target_tx, target_addr, 0);
6828 static inline abi_long host_to_target_timex(abi_long target_addr,
6829 struct timex *host_tx)
6831 struct target_timex *target_tx;
6833 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6834 return -TARGET_EFAULT;
6837 __put_user(host_tx->modes, &target_tx->modes);
6838 __put_user(host_tx->offset, &target_tx->offset);
6839 __put_user(host_tx->freq, &target_tx->freq);
6840 __put_user(host_tx->maxerror, &target_tx->maxerror);
6841 __put_user(host_tx->esterror, &target_tx->esterror);
6842 __put_user(host_tx->status, &target_tx->status);
6843 __put_user(host_tx->constant, &target_tx->constant);
6844 __put_user(host_tx->precision, &target_tx->precision);
6845 __put_user(host_tx->tolerance, &target_tx->tolerance);
6846 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6847 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6848 __put_user(host_tx->tick, &target_tx->tick);
6849 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6850 __put_user(host_tx->jitter, &target_tx->jitter);
6851 __put_user(host_tx->shift, &target_tx->shift);
6852 __put_user(host_tx->stabil, &target_tx->stabil);
6853 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6854 __put_user(host_tx->calcnt, &target_tx->calcnt);
6855 __put_user(host_tx->errcnt, &target_tx->errcnt);
6856 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6857 __put_user(host_tx->tai, &target_tx->tai);
6859 unlock_user_struct(target_tx, target_addr, 1);
6864 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6865 abi_ulong target_addr)
6867 struct target_sigevent *target_sevp;
6869 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6870 return -TARGET_EFAULT;
6873 /* This union is awkward on 64 bit systems because it has a 32 bit
6874 * integer and a pointer in it; we follow the conversion approach
6875 * used for handling sigval types in signal.c so the guest should get
6876 * the correct value back even if we did a 64 bit byteswap and it's
6877 * using the 32 bit integer.
6879 host_sevp->sigev_value.sival_ptr =
6880 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6881 host_sevp->sigev_signo =
6882 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6883 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6884 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6886 unlock_user_struct(target_sevp, target_addr, 1);
6890 #if defined(TARGET_NR_mlockall)
6891 static inline int target_to_host_mlockall_arg(int arg)
6895 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6896 result |= MCL_CURRENT;
6898 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6899 result |= MCL_FUTURE;
6905 static inline abi_long host_to_target_stat64(void *cpu_env,
6906 abi_ulong target_addr,
6907 struct stat *host_st)
6909 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6910 if (((CPUARMState *)cpu_env)->eabi) {
6911 struct target_eabi_stat64 *target_st;
6913 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6914 return -TARGET_EFAULT;
6915 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6916 __put_user(host_st->st_dev, &target_st->st_dev);
6917 __put_user(host_st->st_ino, &target_st->st_ino);
6918 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6919 __put_user(host_st->st_ino, &target_st->__st_ino);
6921 __put_user(host_st->st_mode, &target_st->st_mode);
6922 __put_user(host_st->st_nlink, &target_st->st_nlink);
6923 __put_user(host_st->st_uid, &target_st->st_uid);
6924 __put_user(host_st->st_gid, &target_st->st_gid);
6925 __put_user(host_st->st_rdev, &target_st->st_rdev);
6926 __put_user(host_st->st_size, &target_st->st_size);
6927 __put_user(host_st->st_blksize, &target_st->st_blksize);
6928 __put_user(host_st->st_blocks, &target_st->st_blocks);
6929 __put_user(host_st->st_atime, &target_st->target_st_atime);
6930 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6931 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6932 unlock_user_struct(target_st, target_addr, 1);
6936 #if defined(TARGET_HAS_STRUCT_STAT64)
6937 struct target_stat64 *target_st;
6939 struct target_stat *target_st;
6942 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6943 return -TARGET_EFAULT;
6944 memset(target_st, 0, sizeof(*target_st));
6945 __put_user(host_st->st_dev, &target_st->st_dev);
6946 __put_user(host_st->st_ino, &target_st->st_ino);
6947 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6948 __put_user(host_st->st_ino, &target_st->__st_ino);
6950 __put_user(host_st->st_mode, &target_st->st_mode);
6951 __put_user(host_st->st_nlink, &target_st->st_nlink);
6952 __put_user(host_st->st_uid, &target_st->st_uid);
6953 __put_user(host_st->st_gid, &target_st->st_gid);
6954 __put_user(host_st->st_rdev, &target_st->st_rdev);
6955 /* XXX: better use of kernel struct */
6956 __put_user(host_st->st_size, &target_st->st_size);
6957 __put_user(host_st->st_blksize, &target_st->st_blksize);
6958 __put_user(host_st->st_blocks, &target_st->st_blocks);
6959 __put_user(host_st->st_atime, &target_st->target_st_atime);
6960 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6961 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6962 unlock_user_struct(target_st, target_addr, 1);
6968 /* ??? Using host futex calls even when target atomic operations
6969 are not really atomic probably breaks things. However implementing
6970 futexes locally would make futexes shared between multiple processes
6971 tricky. However they're probably useless because guest atomic
6972 operations won't work either. */
6973 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6974 target_ulong uaddr2, int val3)
6976 struct timespec ts, *pts;
6979 /* ??? We assume FUTEX_* constants are the same on both host
6981 #ifdef FUTEX_CMD_MASK
6982 base_op = op & FUTEX_CMD_MASK;
6988 case FUTEX_WAIT_BITSET:
6991 target_to_host_timespec(pts, timeout);
6995 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6998 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7000 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7002 case FUTEX_CMP_REQUEUE:
7004 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7005 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7006 But the prototype takes a `struct timespec *'; insert casts
7007 to satisfy the compiler. We do not need to tswap TIMEOUT
7008 since it's not compared to guest memory. */
7009 pts = (struct timespec *)(uintptr_t) timeout;
7010 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7012 (base_op == FUTEX_CMP_REQUEUE
7016 return -TARGET_ENOSYS;
7019 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7020 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7021 abi_long handle, abi_long mount_id,
7024 struct file_handle *target_fh;
7025 struct file_handle *fh;
7029 unsigned int size, total_size;
7031 if (get_user_s32(size, handle)) {
7032 return -TARGET_EFAULT;
7035 name = lock_user_string(pathname);
7037 return -TARGET_EFAULT;
7040 total_size = sizeof(struct file_handle) + size;
7041 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7043 unlock_user(name, pathname, 0);
7044 return -TARGET_EFAULT;
7047 fh = g_malloc0(total_size);
7048 fh->handle_bytes = size;
7050 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7051 unlock_user(name, pathname, 0);
7053 /* man name_to_handle_at(2):
7054 * Other than the use of the handle_bytes field, the caller should treat
7055 * the file_handle structure as an opaque data type
7058 memcpy(target_fh, fh, total_size);
7059 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7060 target_fh->handle_type = tswap32(fh->handle_type);
7062 unlock_user(target_fh, handle, total_size);
7064 if (put_user_s32(mid, mount_id)) {
7065 return -TARGET_EFAULT;
7073 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7074 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7077 struct file_handle *target_fh;
7078 struct file_handle *fh;
7079 unsigned int size, total_size;
7082 if (get_user_s32(size, handle)) {
7083 return -TARGET_EFAULT;
7086 total_size = sizeof(struct file_handle) + size;
7087 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7089 return -TARGET_EFAULT;
7092 fh = g_memdup(target_fh, total_size);
7093 fh->handle_bytes = size;
7094 fh->handle_type = tswap32(target_fh->handle_type);
7096 ret = get_errno(open_by_handle_at(mount_fd, fh,
7097 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7101 unlock_user(target_fh, handle, total_size);
7107 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7109 /* signalfd siginfo conversion */
7112 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7113 const struct signalfd_siginfo *info)
7115 int sig = host_to_target_signal(info->ssi_signo);
7117 /* linux/signalfd.h defines a ssi_addr_lsb
7118 * not defined in sys/signalfd.h but used by some kernels
7121 #ifdef BUS_MCEERR_AO
7122 if (tinfo->ssi_signo == SIGBUS &&
7123 (tinfo->ssi_code == BUS_MCEERR_AR ||
7124 tinfo->ssi_code == BUS_MCEERR_AO)) {
7125 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7126 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7127 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7131 tinfo->ssi_signo = tswap32(sig);
7132 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7133 tinfo->ssi_code = tswap32(info->ssi_code);
7134 tinfo->ssi_pid = tswap32(info->ssi_pid);
7135 tinfo->ssi_uid = tswap32(info->ssi_uid);
7136 tinfo->ssi_fd = tswap32(info->ssi_fd);
7137 tinfo->ssi_tid = tswap32(info->ssi_tid);
7138 tinfo->ssi_band = tswap32(info->ssi_band);
7139 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7140 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7141 tinfo->ssi_status = tswap32(info->ssi_status);
7142 tinfo->ssi_int = tswap32(info->ssi_int);
7143 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7144 tinfo->ssi_utime = tswap64(info->ssi_utime);
7145 tinfo->ssi_stime = tswap64(info->ssi_stime);
7146 tinfo->ssi_addr = tswap64(info->ssi_addr);
7149 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7153 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7154 host_to_target_signalfd_siginfo(buf + i, buf + i);
7160 static TargetFdTrans target_signalfd_trans = {
7161 .host_to_target_data = host_to_target_data_signalfd,
7164 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7167 target_sigset_t *target_mask;
7171 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7172 return -TARGET_EINVAL;
7174 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7175 return -TARGET_EFAULT;
7178 target_to_host_sigset(&host_mask, target_mask);
7180 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7182 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7184 fd_trans_register(ret, &target_signalfd_trans);
7187 unlock_user_struct(target_mask, mask, 0);
7193 /* Map host to target signal numbers for the wait family of syscalls.
7194 Assume all other status bits are the same. */
7195 int host_to_target_waitstatus(int status)
7197 if (WIFSIGNALED(status)) {
7198 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7200 if (WIFSTOPPED(status)) {
7201 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7207 static int open_self_cmdline(void *cpu_env, int fd)
7210 bool word_skipped = false;
7212 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7222 nb_read = read(fd_orig, buf, sizeof(buf));
7225 fd_orig = close(fd_orig);
7228 } else if (nb_read == 0) {
7232 if (!word_skipped) {
7233 /* Skip the first string, which is the path to qemu-*-static
7234 instead of the actual command. */
7235 cp_buf = memchr(buf, 0, nb_read);
7237 /* Null byte found, skip one string */
7239 nb_read -= cp_buf - buf;
7240 word_skipped = true;
7245 if (write(fd, cp_buf, nb_read) != nb_read) {
7254 return close(fd_orig);
7257 static int open_self_maps(void *cpu_env, int fd)
7259 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7260 TaskState *ts = cpu->opaque;
7266 fp = fopen("/proc/self/maps", "r");
7271 while ((read = getline(&line, &len, fp)) != -1) {
7272 int fields, dev_maj, dev_min, inode;
7273 uint64_t min, max, offset;
7274 char flag_r, flag_w, flag_x, flag_p;
7275 char path[512] = "";
7276 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7277 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7278 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7280 if ((fields < 10) || (fields > 11)) {
7283 if (h2g_valid(min)) {
7284 int flags = page_get_flags(h2g(min));
7285 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7286 if (page_check_range(h2g(min), max - min, flags) == -1) {
7289 if (h2g(min) == ts->info->stack_limit) {
7290 pstrcpy(path, sizeof(path), " [stack]");
7292 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7293 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7294 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7295 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7296 path[0] ? " " : "", path);
7306 static int open_self_stat(void *cpu_env, int fd)
7308 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7309 TaskState *ts = cpu->opaque;
7310 abi_ulong start_stack = ts->info->start_stack;
7313 for (i = 0; i < 44; i++) {
7321 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7322 } else if (i == 1) {
7324 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7325 } else if (i == 27) {
7328 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7330 /* for the rest, there is MasterCard */
7331 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7335 if (write(fd, buf, len) != len) {
7343 static int open_self_auxv(void *cpu_env, int fd)
7345 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7346 TaskState *ts = cpu->opaque;
7347 abi_ulong auxv = ts->info->saved_auxv;
7348 abi_ulong len = ts->info->auxv_len;
7352 * Auxiliary vector is stored in target process stack.
7353 * read in whole auxv vector and copy it to file
7355 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7359 r = write(fd, ptr, len);
7366 lseek(fd, 0, SEEK_SET);
7367 unlock_user(ptr, auxv, len);
7373 static int is_proc_myself(const char *filename, const char *entry)
7375 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7376 filename += strlen("/proc/");
7377 if (!strncmp(filename, "self/", strlen("self/"))) {
7378 filename += strlen("self/");
7379 } else if (*filename >= '1' && *filename <= '9') {
7381 snprintf(myself, sizeof(myself), "%d/", getpid());
7382 if (!strncmp(filename, myself, strlen(myself))) {
7383 filename += strlen(myself);
7390 if (!strcmp(filename, entry)) {
7397 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7398 static int is_proc(const char *filename, const char *entry)
7400 return strcmp(filename, entry) == 0;
7403 static int open_net_route(void *cpu_env, int fd)
7410 fp = fopen("/proc/net/route", "r");
7417 read = getline(&line, &len, fp);
7418 dprintf(fd, "%s", line);
7422 while ((read = getline(&line, &len, fp)) != -1) {
7424 uint32_t dest, gw, mask;
7425 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7426 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7427 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7428 &mask, &mtu, &window, &irtt);
7429 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7430 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7431 metric, tswap32(mask), mtu, window, irtt);
7441 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7444 const char *filename;
7445 int (*fill)(void *cpu_env, int fd);
7446 int (*cmp)(const char *s1, const char *s2);
7448 const struct fake_open *fake_open;
7449 static const struct fake_open fakes[] = {
7450 { "maps", open_self_maps, is_proc_myself },
7451 { "stat", open_self_stat, is_proc_myself },
7452 { "auxv", open_self_auxv, is_proc_myself },
7453 { "cmdline", open_self_cmdline, is_proc_myself },
7454 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7455 { "/proc/net/route", open_net_route, is_proc },
7457 { NULL, NULL, NULL }
7460 if (is_proc_myself(pathname, "exe")) {
7461 int execfd = qemu_getauxval(AT_EXECFD);
7462 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7465 for (fake_open = fakes; fake_open->filename; fake_open++) {
7466 if (fake_open->cmp(pathname, fake_open->filename)) {
7471 if (fake_open->filename) {
7473 char filename[PATH_MAX];
7476 /* create temporary file to map stat to */
7477 tmpdir = getenv("TMPDIR");
7480 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7481 fd = mkstemp(filename);
7487 if ((r = fake_open->fill(cpu_env, fd))) {
7493 lseek(fd, 0, SEEK_SET);
7498 return safe_openat(dirfd, path(pathname), flags, mode);
7501 #define TIMER_MAGIC 0x0caf0000
7502 #define TIMER_MAGIC_MASK 0xffff0000
7504 /* Convert QEMU provided timer ID back to internal 16bit index format */
7505 static target_timer_t get_timer_id(abi_long arg)
7507 target_timer_t timerid = arg;
7509 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7510 return -TARGET_EINVAL;
7515 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7516 return -TARGET_EINVAL;
7522 /* do_syscall() should always have a single exit point at the end so
7523 that actions, such as logging of syscall results, can be performed.
7524 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7525 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7526 abi_long arg2, abi_long arg3, abi_long arg4,
7527 abi_long arg5, abi_long arg6, abi_long arg7,
7530 CPUState *cpu = ENV_GET_CPU(cpu_env);
7536 #if defined(DEBUG_ERESTARTSYS)
7537 /* Debug-only code for exercising the syscall-restart code paths
7538 * in the per-architecture cpu main loops: restart every syscall
7539 * the guest makes once before letting it through.
7546 return -TARGET_ERESTARTSYS;
7552 gemu_log("syscall %d", num);
7554 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7556 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7559 case TARGET_NR_exit:
7560 /* In old applications this may be used to implement _exit(2).
7561 However in threaded applictions it is used for thread termination,
7562 and _exit_group is used for application termination.
7563 Do thread termination if we have more then one thread. */
7565 if (block_signals()) {
7566 ret = -TARGET_ERESTARTSYS;
7572 if (CPU_NEXT(first_cpu)) {
7575 /* Remove the CPU from the list. */
7576 QTAILQ_REMOVE(&cpus, cpu, node);
7581 if (ts->child_tidptr) {
7582 put_user_u32(0, ts->child_tidptr);
7583 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7587 object_unref(OBJECT(cpu));
7589 rcu_unregister_thread();
7597 gdb_exit(cpu_env, arg1);
7599 ret = 0; /* avoid warning */
7601 case TARGET_NR_read:
7605 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7607 ret = get_errno(safe_read(arg1, p, arg3));
7609 fd_trans_host_to_target_data(arg1)) {
7610 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7612 unlock_user(p, arg2, ret);
7615 case TARGET_NR_write:
7616 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7618 ret = get_errno(safe_write(arg1, p, arg3));
7619 unlock_user(p, arg2, 0);
7621 #ifdef TARGET_NR_open
7622 case TARGET_NR_open:
7623 if (!(p = lock_user_string(arg1)))
7625 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7626 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7628 fd_trans_unregister(ret);
7629 unlock_user(p, arg1, 0);
7632 case TARGET_NR_openat:
7633 if (!(p = lock_user_string(arg2)))
7635 ret = get_errno(do_openat(cpu_env, arg1, p,
7636 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7638 fd_trans_unregister(ret);
7639 unlock_user(p, arg2, 0);
7641 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7642 case TARGET_NR_name_to_handle_at:
7643 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7646 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7647 case TARGET_NR_open_by_handle_at:
7648 ret = do_open_by_handle_at(arg1, arg2, arg3);
7649 fd_trans_unregister(ret);
7652 case TARGET_NR_close:
7653 fd_trans_unregister(arg1);
7654 ret = get_errno(close(arg1));
7659 #ifdef TARGET_NR_fork
7660 case TARGET_NR_fork:
7661 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7664 #ifdef TARGET_NR_waitpid
7665 case TARGET_NR_waitpid:
7668 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7669 if (!is_error(ret) && arg2 && ret
7670 && put_user_s32(host_to_target_waitstatus(status), arg2))
7675 #ifdef TARGET_NR_waitid
7676 case TARGET_NR_waitid:
7680 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7681 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7682 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7684 host_to_target_siginfo(p, &info);
7685 unlock_user(p, arg3, sizeof(target_siginfo_t));
7690 #ifdef TARGET_NR_creat /* not on alpha */
7691 case TARGET_NR_creat:
7692 if (!(p = lock_user_string(arg1)))
7694 ret = get_errno(creat(p, arg2));
7695 fd_trans_unregister(ret);
7696 unlock_user(p, arg1, 0);
7699 #ifdef TARGET_NR_link
7700 case TARGET_NR_link:
7703 p = lock_user_string(arg1);
7704 p2 = lock_user_string(arg2);
7706 ret = -TARGET_EFAULT;
7708 ret = get_errno(link(p, p2));
7709 unlock_user(p2, arg2, 0);
7710 unlock_user(p, arg1, 0);
7714 #if defined(TARGET_NR_linkat)
7715 case TARGET_NR_linkat:
7720 p = lock_user_string(arg2);
7721 p2 = lock_user_string(arg4);
7723 ret = -TARGET_EFAULT;
7725 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7726 unlock_user(p, arg2, 0);
7727 unlock_user(p2, arg4, 0);
7731 #ifdef TARGET_NR_unlink
7732 case TARGET_NR_unlink:
7733 if (!(p = lock_user_string(arg1)))
7735 ret = get_errno(unlink(p));
7736 unlock_user(p, arg1, 0);
7739 #if defined(TARGET_NR_unlinkat)
7740 case TARGET_NR_unlinkat:
7741 if (!(p = lock_user_string(arg2)))
7743 ret = get_errno(unlinkat(arg1, p, arg3));
7744 unlock_user(p, arg2, 0);
7747 case TARGET_NR_execve:
7749 char **argp, **envp;
7752 abi_ulong guest_argp;
7753 abi_ulong guest_envp;
7760 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7761 if (get_user_ual(addr, gp))
7769 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7770 if (get_user_ual(addr, gp))
7777 argp = alloca((argc + 1) * sizeof(void *));
7778 envp = alloca((envc + 1) * sizeof(void *));
7780 for (gp = guest_argp, q = argp; gp;
7781 gp += sizeof(abi_ulong), q++) {
7782 if (get_user_ual(addr, gp))
7786 if (!(*q = lock_user_string(addr)))
7788 total_size += strlen(*q) + 1;
7792 for (gp = guest_envp, q = envp; gp;
7793 gp += sizeof(abi_ulong), q++) {
7794 if (get_user_ual(addr, gp))
7798 if (!(*q = lock_user_string(addr)))
7800 total_size += strlen(*q) + 1;
7804 if (!(p = lock_user_string(arg1)))
7806 /* Although execve() is not an interruptible syscall it is
7807 * a special case where we must use the safe_syscall wrapper:
7808 * if we allow a signal to happen before we make the host
7809 * syscall then we will 'lose' it, because at the point of
7810 * execve the process leaves QEMU's control. So we use the
7811 * safe syscall wrapper to ensure that we either take the
7812 * signal as a guest signal, or else it does not happen
7813 * before the execve completes and makes it the other
7814 * program's problem.
7816 ret = get_errno(safe_execve(p, argp, envp));
7817 unlock_user(p, arg1, 0);
7822 ret = -TARGET_EFAULT;
7825 for (gp = guest_argp, q = argp; *q;
7826 gp += sizeof(abi_ulong), q++) {
7827 if (get_user_ual(addr, gp)
7830 unlock_user(*q, addr, 0);
7832 for (gp = guest_envp, q = envp; *q;
7833 gp += sizeof(abi_ulong), q++) {
7834 if (get_user_ual(addr, gp)
7837 unlock_user(*q, addr, 0);
7841 case TARGET_NR_chdir:
7842 if (!(p = lock_user_string(arg1)))
7844 ret = get_errno(chdir(p));
7845 unlock_user(p, arg1, 0);
7847 #ifdef TARGET_NR_time
7848 case TARGET_NR_time:
7851 ret = get_errno(time(&host_time));
7854 && put_user_sal(host_time, arg1))
7859 #ifdef TARGET_NR_mknod
7860 case TARGET_NR_mknod:
7861 if (!(p = lock_user_string(arg1)))
7863 ret = get_errno(mknod(p, arg2, arg3));
7864 unlock_user(p, arg1, 0);
7867 #if defined(TARGET_NR_mknodat)
7868 case TARGET_NR_mknodat:
7869 if (!(p = lock_user_string(arg2)))
7871 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7872 unlock_user(p, arg2, 0);
7875 #ifdef TARGET_NR_chmod
7876 case TARGET_NR_chmod:
7877 if (!(p = lock_user_string(arg1)))
7879 ret = get_errno(chmod(p, arg2));
7880 unlock_user(p, arg1, 0);
7883 #ifdef TARGET_NR_break
7884 case TARGET_NR_break:
7887 #ifdef TARGET_NR_oldstat
7888 case TARGET_NR_oldstat:
7891 case TARGET_NR_lseek:
7892 ret = get_errno(lseek(arg1, arg2, arg3));
7894 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7895 /* Alpha specific */
7896 case TARGET_NR_getxpid:
7897 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7898 ret = get_errno(getpid());
7901 #ifdef TARGET_NR_getpid
7902 case TARGET_NR_getpid:
7903 ret = get_errno(getpid());
7906 case TARGET_NR_mount:
7908 /* need to look at the data field */
7912 p = lock_user_string(arg1);
7920 p2 = lock_user_string(arg2);
7923 unlock_user(p, arg1, 0);
7929 p3 = lock_user_string(arg3);
7932 unlock_user(p, arg1, 0);
7934 unlock_user(p2, arg2, 0);
7941 /* FIXME - arg5 should be locked, but it isn't clear how to
7942 * do that since it's not guaranteed to be a NULL-terminated
7946 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7948 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7950 ret = get_errno(ret);
7953 unlock_user(p, arg1, 0);
7955 unlock_user(p2, arg2, 0);
7957 unlock_user(p3, arg3, 0);
7961 #ifdef TARGET_NR_umount
7962 case TARGET_NR_umount:
7963 if (!(p = lock_user_string(arg1)))
7965 ret = get_errno(umount(p));
7966 unlock_user(p, arg1, 0);
7969 #ifdef TARGET_NR_stime /* not on alpha */
7970 case TARGET_NR_stime:
7973 if (get_user_sal(host_time, arg1))
7975 ret = get_errno(stime(&host_time));
7979 case TARGET_NR_ptrace:
7981 #ifdef TARGET_NR_alarm /* not on alpha */
7982 case TARGET_NR_alarm:
7986 #ifdef TARGET_NR_oldfstat
7987 case TARGET_NR_oldfstat:
7990 #ifdef TARGET_NR_pause /* not on alpha */
7991 case TARGET_NR_pause:
7992 if (!block_signals()) {
7993 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7995 ret = -TARGET_EINTR;
7998 #ifdef TARGET_NR_utime
7999 case TARGET_NR_utime:
8001 struct utimbuf tbuf, *host_tbuf;
8002 struct target_utimbuf *target_tbuf;
8004 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8006 tbuf.actime = tswapal(target_tbuf->actime);
8007 tbuf.modtime = tswapal(target_tbuf->modtime);
8008 unlock_user_struct(target_tbuf, arg2, 0);
8013 if (!(p = lock_user_string(arg1)))
8015 ret = get_errno(utime(p, host_tbuf));
8016 unlock_user(p, arg1, 0);
8020 #ifdef TARGET_NR_utimes
8021 case TARGET_NR_utimes:
8023 struct timeval *tvp, tv[2];
8025 if (copy_from_user_timeval(&tv[0], arg2)
8026 || copy_from_user_timeval(&tv[1],
8027 arg2 + sizeof(struct target_timeval)))
8033 if (!(p = lock_user_string(arg1)))
8035 ret = get_errno(utimes(p, tvp));
8036 unlock_user(p, arg1, 0);
8040 #if defined(TARGET_NR_futimesat)
8041 case TARGET_NR_futimesat:
8043 struct timeval *tvp, tv[2];
8045 if (copy_from_user_timeval(&tv[0], arg3)
8046 || copy_from_user_timeval(&tv[1],
8047 arg3 + sizeof(struct target_timeval)))
8053 if (!(p = lock_user_string(arg2)))
8055 ret = get_errno(futimesat(arg1, path(p), tvp));
8056 unlock_user(p, arg2, 0);
8060 #ifdef TARGET_NR_stty
8061 case TARGET_NR_stty:
8064 #ifdef TARGET_NR_gtty
8065 case TARGET_NR_gtty:
8068 #ifdef TARGET_NR_access
8069 case TARGET_NR_access:
8070 if (!(p = lock_user_string(arg1)))
8072 ret = get_errno(access(path(p), arg2));
8073 unlock_user(p, arg1, 0);
8076 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8077 case TARGET_NR_faccessat:
8078 if (!(p = lock_user_string(arg2)))
8080 ret = get_errno(faccessat(arg1, p, arg3, 0));
8081 unlock_user(p, arg2, 0);
8084 #ifdef TARGET_NR_nice /* not on alpha */
8085 case TARGET_NR_nice:
8086 ret = get_errno(nice(arg1));
8089 #ifdef TARGET_NR_ftime
8090 case TARGET_NR_ftime:
8093 case TARGET_NR_sync:
8097 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8098 case TARGET_NR_syncfs:
8099 ret = get_errno(syncfs(arg1));
8102 case TARGET_NR_kill:
8103 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8105 #ifdef TARGET_NR_rename
8106 case TARGET_NR_rename:
8109 p = lock_user_string(arg1);
8110 p2 = lock_user_string(arg2);
8112 ret = -TARGET_EFAULT;
8114 ret = get_errno(rename(p, p2));
8115 unlock_user(p2, arg2, 0);
8116 unlock_user(p, arg1, 0);
8120 #if defined(TARGET_NR_renameat)
8121 case TARGET_NR_renameat:
8124 p = lock_user_string(arg2);
8125 p2 = lock_user_string(arg4);
8127 ret = -TARGET_EFAULT;
8129 ret = get_errno(renameat(arg1, p, arg3, p2));
8130 unlock_user(p2, arg4, 0);
8131 unlock_user(p, arg2, 0);
8135 #ifdef TARGET_NR_mkdir
8136 case TARGET_NR_mkdir:
8137 if (!(p = lock_user_string(arg1)))
8139 ret = get_errno(mkdir(p, arg2));
8140 unlock_user(p, arg1, 0);
8143 #if defined(TARGET_NR_mkdirat)
8144 case TARGET_NR_mkdirat:
8145 if (!(p = lock_user_string(arg2)))
8147 ret = get_errno(mkdirat(arg1, p, arg3));
8148 unlock_user(p, arg2, 0);
8151 #ifdef TARGET_NR_rmdir
8152 case TARGET_NR_rmdir:
8153 if (!(p = lock_user_string(arg1)))
8155 ret = get_errno(rmdir(p));
8156 unlock_user(p, arg1, 0);
8160 ret = get_errno(dup(arg1));
8162 fd_trans_dup(arg1, ret);
8165 #ifdef TARGET_NR_pipe
8166 case TARGET_NR_pipe:
8167 ret = do_pipe(cpu_env, arg1, 0, 0);
8170 #ifdef TARGET_NR_pipe2
8171 case TARGET_NR_pipe2:
8172 ret = do_pipe(cpu_env, arg1,
8173 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8176 case TARGET_NR_times:
8178 struct target_tms *tmsp;
8180 ret = get_errno(times(&tms));
8182 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8185 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8186 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8187 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8188 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8191 ret = host_to_target_clock_t(ret);
8194 #ifdef TARGET_NR_prof
8195 case TARGET_NR_prof:
8198 #ifdef TARGET_NR_signal
8199 case TARGET_NR_signal:
8202 case TARGET_NR_acct:
8204 ret = get_errno(acct(NULL));
8206 if (!(p = lock_user_string(arg1)))
8208 ret = get_errno(acct(path(p)));
8209 unlock_user(p, arg1, 0);
8212 #ifdef TARGET_NR_umount2
8213 case TARGET_NR_umount2:
8214 if (!(p = lock_user_string(arg1)))
8216 ret = get_errno(umount2(p, arg2));
8217 unlock_user(p, arg1, 0);
8220 #ifdef TARGET_NR_lock
8221 case TARGET_NR_lock:
8224 case TARGET_NR_ioctl:
8225 ret = do_ioctl(arg1, arg2, arg3);
8227 case TARGET_NR_fcntl:
8228 ret = do_fcntl(arg1, arg2, arg3);
8230 #ifdef TARGET_NR_mpx
8234 case TARGET_NR_setpgid:
8235 ret = get_errno(setpgid(arg1, arg2));
8237 #ifdef TARGET_NR_ulimit
8238 case TARGET_NR_ulimit:
8241 #ifdef TARGET_NR_oldolduname
8242 case TARGET_NR_oldolduname:
8245 case TARGET_NR_umask:
8246 ret = get_errno(umask(arg1));
8248 case TARGET_NR_chroot:
8249 if (!(p = lock_user_string(arg1)))
8251 ret = get_errno(chroot(p));
8252 unlock_user(p, arg1, 0);
8254 #ifdef TARGET_NR_ustat
8255 case TARGET_NR_ustat:
8258 #ifdef TARGET_NR_dup2
8259 case TARGET_NR_dup2:
8260 ret = get_errno(dup2(arg1, arg2));
8262 fd_trans_dup(arg1, arg2);
8266 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8267 case TARGET_NR_dup3:
8268 ret = get_errno(dup3(arg1, arg2, arg3));
8270 fd_trans_dup(arg1, arg2);
8274 #ifdef TARGET_NR_getppid /* not on alpha */
8275 case TARGET_NR_getppid:
8276 ret = get_errno(getppid());
8279 #ifdef TARGET_NR_getpgrp
8280 case TARGET_NR_getpgrp:
8281 ret = get_errno(getpgrp());
8284 case TARGET_NR_setsid:
8285 ret = get_errno(setsid());
8287 #ifdef TARGET_NR_sigaction
8288 case TARGET_NR_sigaction:
8290 #if defined(TARGET_ALPHA)
8291 struct target_sigaction act, oact, *pact = 0;
8292 struct target_old_sigaction *old_act;
8294 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8296 act._sa_handler = old_act->_sa_handler;
8297 target_siginitset(&act.sa_mask, old_act->sa_mask);
8298 act.sa_flags = old_act->sa_flags;
8299 act.sa_restorer = 0;
8300 unlock_user_struct(old_act, arg2, 0);
8303 ret = get_errno(do_sigaction(arg1, pact, &oact));
8304 if (!is_error(ret) && arg3) {
8305 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8307 old_act->_sa_handler = oact._sa_handler;
8308 old_act->sa_mask = oact.sa_mask.sig[0];
8309 old_act->sa_flags = oact.sa_flags;
8310 unlock_user_struct(old_act, arg3, 1);
8312 #elif defined(TARGET_MIPS)
8313 struct target_sigaction act, oact, *pact, *old_act;
8316 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8318 act._sa_handler = old_act->_sa_handler;
8319 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8320 act.sa_flags = old_act->sa_flags;
8321 unlock_user_struct(old_act, arg2, 0);
8327 ret = get_errno(do_sigaction(arg1, pact, &oact));
8329 if (!is_error(ret) && arg3) {
8330 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8332 old_act->_sa_handler = oact._sa_handler;
8333 old_act->sa_flags = oact.sa_flags;
8334 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8335 old_act->sa_mask.sig[1] = 0;
8336 old_act->sa_mask.sig[2] = 0;
8337 old_act->sa_mask.sig[3] = 0;
8338 unlock_user_struct(old_act, arg3, 1);
8341 struct target_old_sigaction *old_act;
8342 struct target_sigaction act, oact, *pact;
8344 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8346 act._sa_handler = old_act->_sa_handler;
8347 target_siginitset(&act.sa_mask, old_act->sa_mask);
8348 act.sa_flags = old_act->sa_flags;
8349 act.sa_restorer = old_act->sa_restorer;
8350 unlock_user_struct(old_act, arg2, 0);
8355 ret = get_errno(do_sigaction(arg1, pact, &oact));
8356 if (!is_error(ret) && arg3) {
8357 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8359 old_act->_sa_handler = oact._sa_handler;
8360 old_act->sa_mask = oact.sa_mask.sig[0];
8361 old_act->sa_flags = oact.sa_flags;
8362 old_act->sa_restorer = oact.sa_restorer;
8363 unlock_user_struct(old_act, arg3, 1);
8369 case TARGET_NR_rt_sigaction:
8371 #if defined(TARGET_ALPHA)
8372 struct target_sigaction act, oact, *pact = 0;
8373 struct target_rt_sigaction *rt_act;
8375 if (arg4 != sizeof(target_sigset_t)) {
8376 ret = -TARGET_EINVAL;
8380 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8382 act._sa_handler = rt_act->_sa_handler;
8383 act.sa_mask = rt_act->sa_mask;
8384 act.sa_flags = rt_act->sa_flags;
8385 act.sa_restorer = arg5;
8386 unlock_user_struct(rt_act, arg2, 0);
8389 ret = get_errno(do_sigaction(arg1, pact, &oact));
8390 if (!is_error(ret) && arg3) {
8391 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8393 rt_act->_sa_handler = oact._sa_handler;
8394 rt_act->sa_mask = oact.sa_mask;
8395 rt_act->sa_flags = oact.sa_flags;
8396 unlock_user_struct(rt_act, arg3, 1);
8399 struct target_sigaction *act;
8400 struct target_sigaction *oact;
8402 if (arg4 != sizeof(target_sigset_t)) {
8403 ret = -TARGET_EINVAL;
8407 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8412 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8413 ret = -TARGET_EFAULT;
8414 goto rt_sigaction_fail;
8418 ret = get_errno(do_sigaction(arg1, act, oact));
8421 unlock_user_struct(act, arg2, 0);
8423 unlock_user_struct(oact, arg3, 1);
8427 #ifdef TARGET_NR_sgetmask /* not on alpha */
8428 case TARGET_NR_sgetmask:
8431 abi_ulong target_set;
8432 ret = do_sigprocmask(0, NULL, &cur_set);
8434 host_to_target_old_sigset(&target_set, &cur_set);
8440 #ifdef TARGET_NR_ssetmask /* not on alpha */
8441 case TARGET_NR_ssetmask:
8443 sigset_t set, oset, cur_set;
8444 abi_ulong target_set = arg1;
8445 /* We only have one word of the new mask so we must read
8446 * the rest of it with do_sigprocmask() and OR in this word.
8447 * We are guaranteed that a do_sigprocmask() that only queries
8448 * the signal mask will not fail.
8450 ret = do_sigprocmask(0, NULL, &cur_set);
8452 target_to_host_old_sigset(&set, &target_set);
8453 sigorset(&set, &set, &cur_set);
8454 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8456 host_to_target_old_sigset(&target_set, &oset);
8462 #ifdef TARGET_NR_sigprocmask
8463 case TARGET_NR_sigprocmask:
8465 #if defined(TARGET_ALPHA)
8466 sigset_t set, oldset;
8471 case TARGET_SIG_BLOCK:
8474 case TARGET_SIG_UNBLOCK:
8477 case TARGET_SIG_SETMASK:
8481 ret = -TARGET_EINVAL;
8485 target_to_host_old_sigset(&set, &mask);
8487 ret = do_sigprocmask(how, &set, &oldset);
8488 if (!is_error(ret)) {
8489 host_to_target_old_sigset(&mask, &oldset);
8491 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8494 sigset_t set, oldset, *set_ptr;
8499 case TARGET_SIG_BLOCK:
8502 case TARGET_SIG_UNBLOCK:
8505 case TARGET_SIG_SETMASK:
8509 ret = -TARGET_EINVAL;
8512 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8514 target_to_host_old_sigset(&set, p);
8515 unlock_user(p, arg2, 0);
8521 ret = do_sigprocmask(how, set_ptr, &oldset);
8522 if (!is_error(ret) && arg3) {
8523 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8525 host_to_target_old_sigset(p, &oldset);
8526 unlock_user(p, arg3, sizeof(target_sigset_t));
8532 case TARGET_NR_rt_sigprocmask:
8535 sigset_t set, oldset, *set_ptr;
8537 if (arg4 != sizeof(target_sigset_t)) {
8538 ret = -TARGET_EINVAL;
8544 case TARGET_SIG_BLOCK:
8547 case TARGET_SIG_UNBLOCK:
8550 case TARGET_SIG_SETMASK:
8554 ret = -TARGET_EINVAL;
8557 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8559 target_to_host_sigset(&set, p);
8560 unlock_user(p, arg2, 0);
8566 ret = do_sigprocmask(how, set_ptr, &oldset);
8567 if (!is_error(ret) && arg3) {
8568 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8570 host_to_target_sigset(p, &oldset);
8571 unlock_user(p, arg3, sizeof(target_sigset_t));
8575 #ifdef TARGET_NR_sigpending
8576 case TARGET_NR_sigpending:
8579 ret = get_errno(sigpending(&set));
8580 if (!is_error(ret)) {
8581 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8583 host_to_target_old_sigset(p, &set);
8584 unlock_user(p, arg1, sizeof(target_sigset_t));
8589 case TARGET_NR_rt_sigpending:
8593 /* Yes, this check is >, not != like most. We follow the kernel's
8594 * logic and it does it like this because it implements
8595 * NR_sigpending through the same code path, and in that case
8596 * the old_sigset_t is smaller in size.
8598 if (arg2 > sizeof(target_sigset_t)) {
8599 ret = -TARGET_EINVAL;
8603 ret = get_errno(sigpending(&set));
8604 if (!is_error(ret)) {
8605 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8607 host_to_target_sigset(p, &set);
8608 unlock_user(p, arg1, sizeof(target_sigset_t));
8612 #ifdef TARGET_NR_sigsuspend
8613 case TARGET_NR_sigsuspend:
8615 TaskState *ts = cpu->opaque;
8616 #if defined(TARGET_ALPHA)
8617 abi_ulong mask = arg1;
8618 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8620 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8622 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8623 unlock_user(p, arg1, 0);
8625 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8627 if (ret != -TARGET_ERESTARTSYS) {
8628 ts->in_sigsuspend = 1;
8633 case TARGET_NR_rt_sigsuspend:
8635 TaskState *ts = cpu->opaque;
8637 if (arg2 != sizeof(target_sigset_t)) {
8638 ret = -TARGET_EINVAL;
8641 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8643 target_to_host_sigset(&ts->sigsuspend_mask, p);
8644 unlock_user(p, arg1, 0);
8645 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8647 if (ret != -TARGET_ERESTARTSYS) {
8648 ts->in_sigsuspend = 1;
8652 case TARGET_NR_rt_sigtimedwait:
8655 struct timespec uts, *puts;
8658 if (arg4 != sizeof(target_sigset_t)) {
8659 ret = -TARGET_EINVAL;
8663 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8665 target_to_host_sigset(&set, p);
8666 unlock_user(p, arg1, 0);
8669 target_to_host_timespec(puts, arg3);
8673 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8675 if (!is_error(ret)) {
8677 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8682 host_to_target_siginfo(p, &uinfo);
8683 unlock_user(p, arg2, sizeof(target_siginfo_t));
8685 ret = host_to_target_signal(ret);
8689 case TARGET_NR_rt_sigqueueinfo:
8693 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8697 target_to_host_siginfo(&uinfo, p);
8698 unlock_user(p, arg1, 0);
8699 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8702 #ifdef TARGET_NR_sigreturn
8703 case TARGET_NR_sigreturn:
8704 if (block_signals()) {
8705 ret = -TARGET_ERESTARTSYS;
8707 ret = do_sigreturn(cpu_env);
8711 case TARGET_NR_rt_sigreturn:
8712 if (block_signals()) {
8713 ret = -TARGET_ERESTARTSYS;
8715 ret = do_rt_sigreturn(cpu_env);
8718 case TARGET_NR_sethostname:
8719 if (!(p = lock_user_string(arg1)))
8721 ret = get_errno(sethostname(p, arg2));
8722 unlock_user(p, arg1, 0);
8724 case TARGET_NR_setrlimit:
8726 int resource = target_to_host_resource(arg1);
8727 struct target_rlimit *target_rlim;
8729 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8731 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8732 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8733 unlock_user_struct(target_rlim, arg2, 0);
8734 ret = get_errno(setrlimit(resource, &rlim));
8737 case TARGET_NR_getrlimit:
8739 int resource = target_to_host_resource(arg1);
8740 struct target_rlimit *target_rlim;
8743 ret = get_errno(getrlimit(resource, &rlim));
8744 if (!is_error(ret)) {
8745 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8747 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8748 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8749 unlock_user_struct(target_rlim, arg2, 1);
8753 case TARGET_NR_getrusage:
8755 struct rusage rusage;
8756 ret = get_errno(getrusage(arg1, &rusage));
8757 if (!is_error(ret)) {
8758 ret = host_to_target_rusage(arg2, &rusage);
8762 case TARGET_NR_gettimeofday:
8765 ret = get_errno(gettimeofday(&tv, NULL));
8766 if (!is_error(ret)) {
8767 if (copy_to_user_timeval(arg1, &tv))
8772 case TARGET_NR_settimeofday:
8774 struct timeval tv, *ptv = NULL;
8775 struct timezone tz, *ptz = NULL;
8778 if (copy_from_user_timeval(&tv, arg1)) {
8785 if (copy_from_user_timezone(&tz, arg2)) {
8791 ret = get_errno(settimeofday(ptv, ptz));
8794 #if defined(TARGET_NR_select)
8795 case TARGET_NR_select:
8796 #if defined(TARGET_WANT_NI_OLD_SELECT)
8797 /* some architectures used to have old_select here
8798 * but now ENOSYS it.
8800 ret = -TARGET_ENOSYS;
8801 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8802 ret = do_old_select(arg1);
8804 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8808 #ifdef TARGET_NR_pselect6
8809 case TARGET_NR_pselect6:
8811 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8812 fd_set rfds, wfds, efds;
8813 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8814 struct timespec ts, *ts_ptr;
8817 * The 6th arg is actually two args smashed together,
8818 * so we cannot use the C library.
8826 abi_ulong arg_sigset, arg_sigsize, *arg7;
8827 target_sigset_t *target_sigset;
8835 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8839 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8843 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8849 * This takes a timespec, and not a timeval, so we cannot
8850 * use the do_select() helper ...
8853 if (target_to_host_timespec(&ts, ts_addr)) {
8861 /* Extract the two packed args for the sigset */
8864 sig.size = SIGSET_T_SIZE;
8866 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8870 arg_sigset = tswapal(arg7[0]);
8871 arg_sigsize = tswapal(arg7[1]);
8872 unlock_user(arg7, arg6, 0);
8876 if (arg_sigsize != sizeof(*target_sigset)) {
8877 /* Like the kernel, we enforce correct size sigsets */
8878 ret = -TARGET_EINVAL;
8881 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8882 sizeof(*target_sigset), 1);
8883 if (!target_sigset) {
8886 target_to_host_sigset(&set, target_sigset);
8887 unlock_user(target_sigset, arg_sigset, 0);
8895 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8898 if (!is_error(ret)) {
8899 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8901 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8903 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8906 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8912 #ifdef TARGET_NR_symlink
8913 case TARGET_NR_symlink:
8916 p = lock_user_string(arg1);
8917 p2 = lock_user_string(arg2);
8919 ret = -TARGET_EFAULT;
8921 ret = get_errno(symlink(p, p2));
8922 unlock_user(p2, arg2, 0);
8923 unlock_user(p, arg1, 0);
8927 #if defined(TARGET_NR_symlinkat)
8928 case TARGET_NR_symlinkat:
8931 p = lock_user_string(arg1);
8932 p2 = lock_user_string(arg3);
8934 ret = -TARGET_EFAULT;
8936 ret = get_errno(symlinkat(p, arg2, p2));
8937 unlock_user(p2, arg3, 0);
8938 unlock_user(p, arg1, 0);
8942 #ifdef TARGET_NR_oldlstat
8943 case TARGET_NR_oldlstat:
8946 #ifdef TARGET_NR_readlink
8947 case TARGET_NR_readlink:
8950 p = lock_user_string(arg1);
8951 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8953 ret = -TARGET_EFAULT;
8955 /* Short circuit this for the magic exe check. */
8956 ret = -TARGET_EINVAL;
8957 } else if (is_proc_myself((const char *)p, "exe")) {
8958 char real[PATH_MAX], *temp;
8959 temp = realpath(exec_path, real);
8960 /* Return value is # of bytes that we wrote to the buffer. */
8962 ret = get_errno(-1);
8964 /* Don't worry about sign mismatch as earlier mapping
8965 * logic would have thrown a bad address error. */
8966 ret = MIN(strlen(real), arg3);
8967 /* We cannot NUL terminate the string. */
8968 memcpy(p2, real, ret);
8971 ret = get_errno(readlink(path(p), p2, arg3));
8973 unlock_user(p2, arg2, ret);
8974 unlock_user(p, arg1, 0);
8978 #if defined(TARGET_NR_readlinkat)
8979 case TARGET_NR_readlinkat:
8982 p = lock_user_string(arg2);
8983 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8985 ret = -TARGET_EFAULT;
8986 } else if (is_proc_myself((const char *)p, "exe")) {
8987 char real[PATH_MAX], *temp;
8988 temp = realpath(exec_path, real);
8989 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8990 snprintf((char *)p2, arg4, "%s", real);
8992 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8994 unlock_user(p2, arg3, ret);
8995 unlock_user(p, arg2, 0);
8999 #ifdef TARGET_NR_uselib
9000 case TARGET_NR_uselib:
9003 #ifdef TARGET_NR_swapon
9004 case TARGET_NR_swapon:
9005 if (!(p = lock_user_string(arg1)))
9007 ret = get_errno(swapon(p, arg2));
9008 unlock_user(p, arg1, 0);
9011 case TARGET_NR_reboot:
9012 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9013 /* arg4 must be ignored in all other cases */
9014 p = lock_user_string(arg4);
9018 ret = get_errno(reboot(arg1, arg2, arg3, p));
9019 unlock_user(p, arg4, 0);
9021 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9024 #ifdef TARGET_NR_readdir
9025 case TARGET_NR_readdir:
9028 #ifdef TARGET_NR_mmap
9029 case TARGET_NR_mmap:
9030 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9031 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9032 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9033 || defined(TARGET_S390X)
9036 abi_ulong v1, v2, v3, v4, v5, v6;
9037 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9045 unlock_user(v, arg1, 0);
9046 ret = get_errno(target_mmap(v1, v2, v3,
9047 target_to_host_bitmask(v4, mmap_flags_tbl),
9051 ret = get_errno(target_mmap(arg1, arg2, arg3,
9052 target_to_host_bitmask(arg4, mmap_flags_tbl),
9058 #ifdef TARGET_NR_mmap2
9059 case TARGET_NR_mmap2:
9061 #define MMAP_SHIFT 12
9063 ret = get_errno(target_mmap(arg1, arg2, arg3,
9064 target_to_host_bitmask(arg4, mmap_flags_tbl),
9066 arg6 << MMAP_SHIFT));
9069 case TARGET_NR_munmap:
9070 ret = get_errno(target_munmap(arg1, arg2));
9072 case TARGET_NR_mprotect:
9074 TaskState *ts = cpu->opaque;
9075 /* Special hack to detect libc making the stack executable. */
9076 if ((arg3 & PROT_GROWSDOWN)
9077 && arg1 >= ts->info->stack_limit
9078 && arg1 <= ts->info->start_stack) {
9079 arg3 &= ~PROT_GROWSDOWN;
9080 arg2 = arg2 + arg1 - ts->info->stack_limit;
9081 arg1 = ts->info->stack_limit;
9084 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9086 #ifdef TARGET_NR_mremap
9087 case TARGET_NR_mremap:
9088 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9091 /* ??? msync/mlock/munlock are broken for softmmu. */
9092 #ifdef TARGET_NR_msync
9093 case TARGET_NR_msync:
9094 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9097 #ifdef TARGET_NR_mlock
9098 case TARGET_NR_mlock:
9099 ret = get_errno(mlock(g2h(arg1), arg2));
9102 #ifdef TARGET_NR_munlock
9103 case TARGET_NR_munlock:
9104 ret = get_errno(munlock(g2h(arg1), arg2));
9107 #ifdef TARGET_NR_mlockall
9108 case TARGET_NR_mlockall:
9109 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9112 #ifdef TARGET_NR_munlockall
9113 case TARGET_NR_munlockall:
9114 ret = get_errno(munlockall());
9117 case TARGET_NR_truncate:
9118 if (!(p = lock_user_string(arg1)))
9120 ret = get_errno(truncate(p, arg2));
9121 unlock_user(p, arg1, 0);
9123 case TARGET_NR_ftruncate:
9124 ret = get_errno(ftruncate(arg1, arg2));
9126 case TARGET_NR_fchmod:
9127 ret = get_errno(fchmod(arg1, arg2));
9129 #if defined(TARGET_NR_fchmodat)
9130 case TARGET_NR_fchmodat:
9131 if (!(p = lock_user_string(arg2)))
9133 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9134 unlock_user(p, arg2, 0);
9137 case TARGET_NR_getpriority:
9138 /* Note that negative values are valid for getpriority, so we must
9139 differentiate based on errno settings. */
9141 ret = getpriority(arg1, arg2);
9142 if (ret == -1 && errno != 0) {
9143 ret = -host_to_target_errno(errno);
9147 /* Return value is the unbiased priority. Signal no error. */
9148 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9150 /* Return value is a biased priority to avoid negative numbers. */
9154 case TARGET_NR_setpriority:
9155 ret = get_errno(setpriority(arg1, arg2, arg3));
9157 #ifdef TARGET_NR_profil
9158 case TARGET_NR_profil:
9161 case TARGET_NR_statfs:
9162 if (!(p = lock_user_string(arg1)))
9164 ret = get_errno(statfs(path(p), &stfs));
9165 unlock_user(p, arg1, 0);
9167 if (!is_error(ret)) {
9168 struct target_statfs *target_stfs;
9170 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9172 __put_user(stfs.f_type, &target_stfs->f_type);
9173 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9174 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9175 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9176 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9177 __put_user(stfs.f_files, &target_stfs->f_files);
9178 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9179 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9180 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9181 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9182 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9183 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9184 unlock_user_struct(target_stfs, arg2, 1);
9187 case TARGET_NR_fstatfs:
9188 ret = get_errno(fstatfs(arg1, &stfs));
9189 goto convert_statfs;
9190 #ifdef TARGET_NR_statfs64
9191 case TARGET_NR_statfs64:
9192 if (!(p = lock_user_string(arg1)))
9194 ret = get_errno(statfs(path(p), &stfs));
9195 unlock_user(p, arg1, 0);
9197 if (!is_error(ret)) {
9198 struct target_statfs64 *target_stfs;
9200 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9202 __put_user(stfs.f_type, &target_stfs->f_type);
9203 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9204 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9205 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9206 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9207 __put_user(stfs.f_files, &target_stfs->f_files);
9208 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9209 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9210 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9211 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9212 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9213 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9214 unlock_user_struct(target_stfs, arg3, 1);
9217 case TARGET_NR_fstatfs64:
9218 ret = get_errno(fstatfs(arg1, &stfs));
9219 goto convert_statfs64;
9221 #ifdef TARGET_NR_ioperm
9222 case TARGET_NR_ioperm:
9225 #ifdef TARGET_NR_socketcall
9226 case TARGET_NR_socketcall:
9227 ret = do_socketcall(arg1, arg2);
9230 #ifdef TARGET_NR_accept
9231 case TARGET_NR_accept:
9232 ret = do_accept4(arg1, arg2, arg3, 0);
9235 #ifdef TARGET_NR_accept4
9236 case TARGET_NR_accept4:
9237 ret = do_accept4(arg1, arg2, arg3, arg4);
9240 #ifdef TARGET_NR_bind
9241 case TARGET_NR_bind:
9242 ret = do_bind(arg1, arg2, arg3);
9245 #ifdef TARGET_NR_connect
9246 case TARGET_NR_connect:
9247 ret = do_connect(arg1, arg2, arg3);
9250 #ifdef TARGET_NR_getpeername
9251 case TARGET_NR_getpeername:
9252 ret = do_getpeername(arg1, arg2, arg3);
9255 #ifdef TARGET_NR_getsockname
9256 case TARGET_NR_getsockname:
9257 ret = do_getsockname(arg1, arg2, arg3);
9260 #ifdef TARGET_NR_getsockopt
9261 case TARGET_NR_getsockopt:
9262 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9265 #ifdef TARGET_NR_listen
9266 case TARGET_NR_listen:
9267 ret = get_errno(listen(arg1, arg2));
9270 #ifdef TARGET_NR_recv
9271 case TARGET_NR_recv:
9272 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9275 #ifdef TARGET_NR_recvfrom
9276 case TARGET_NR_recvfrom:
9277 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9280 #ifdef TARGET_NR_recvmsg
9281 case TARGET_NR_recvmsg:
9282 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9285 #ifdef TARGET_NR_send
9286 case TARGET_NR_send:
9287 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9290 #ifdef TARGET_NR_sendmsg
9291 case TARGET_NR_sendmsg:
9292 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9295 #ifdef TARGET_NR_sendmmsg
9296 case TARGET_NR_sendmmsg:
9297 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9299 case TARGET_NR_recvmmsg:
9300 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9303 #ifdef TARGET_NR_sendto
9304 case TARGET_NR_sendto:
9305 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9308 #ifdef TARGET_NR_shutdown
9309 case TARGET_NR_shutdown:
9310 ret = get_errno(shutdown(arg1, arg2));
9313 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9314 case TARGET_NR_getrandom:
9315 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9319 ret = get_errno(getrandom(p, arg2, arg3));
9320 unlock_user(p, arg1, ret);
9323 #ifdef TARGET_NR_socket
9324 case TARGET_NR_socket:
9325 ret = do_socket(arg1, arg2, arg3);
9326 fd_trans_unregister(ret);
9329 #ifdef TARGET_NR_socketpair
9330 case TARGET_NR_socketpair:
9331 ret = do_socketpair(arg1, arg2, arg3, arg4);
9334 #ifdef TARGET_NR_setsockopt
9335 case TARGET_NR_setsockopt:
9336 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9339 #if defined(TARGET_NR_syslog)
9340 case TARGET_NR_syslog:
9345 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9346 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9347 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9348 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9349 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9350 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9351 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9352 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9354 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9357 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9358 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9359 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9361 ret = -TARGET_EINVAL;
9369 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9371 ret = -TARGET_EFAULT;
9374 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9375 unlock_user(p, arg2, arg3);
9385 case TARGET_NR_setitimer:
9387 struct itimerval value, ovalue, *pvalue;
9391 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9392 || copy_from_user_timeval(&pvalue->it_value,
9393 arg2 + sizeof(struct target_timeval)))
9398 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9399 if (!is_error(ret) && arg3) {
9400 if (copy_to_user_timeval(arg3,
9401 &ovalue.it_interval)
9402 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9408 case TARGET_NR_getitimer:
9410 struct itimerval value;
9412 ret = get_errno(getitimer(arg1, &value));
9413 if (!is_error(ret) && arg2) {
9414 if (copy_to_user_timeval(arg2,
9416 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9422 #ifdef TARGET_NR_stat
9423 case TARGET_NR_stat:
9424 if (!(p = lock_user_string(arg1)))
9426 ret = get_errno(stat(path(p), &st));
9427 unlock_user(p, arg1, 0);
9430 #ifdef TARGET_NR_lstat
9431 case TARGET_NR_lstat:
9432 if (!(p = lock_user_string(arg1)))
9434 ret = get_errno(lstat(path(p), &st));
9435 unlock_user(p, arg1, 0);
9438 case TARGET_NR_fstat:
9440 ret = get_errno(fstat(arg1, &st));
9441 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9444 if (!is_error(ret)) {
9445 struct target_stat *target_st;
9447 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9449 memset(target_st, 0, sizeof(*target_st));
9450 __put_user(st.st_dev, &target_st->st_dev);
9451 __put_user(st.st_ino, &target_st->st_ino);
9452 __put_user(st.st_mode, &target_st->st_mode);
9453 __put_user(st.st_uid, &target_st->st_uid);
9454 __put_user(st.st_gid, &target_st->st_gid);
9455 __put_user(st.st_nlink, &target_st->st_nlink);
9456 __put_user(st.st_rdev, &target_st->st_rdev);
9457 __put_user(st.st_size, &target_st->st_size);
9458 __put_user(st.st_blksize, &target_st->st_blksize);
9459 __put_user(st.st_blocks, &target_st->st_blocks);
9460 __put_user(st.st_atime, &target_st->target_st_atime);
9461 __put_user(st.st_mtime, &target_st->target_st_mtime);
9462 __put_user(st.st_ctime, &target_st->target_st_ctime);
9463 unlock_user_struct(target_st, arg2, 1);
9467 #ifdef TARGET_NR_olduname
9468 case TARGET_NR_olduname:
9471 #ifdef TARGET_NR_iopl
9472 case TARGET_NR_iopl:
9475 case TARGET_NR_vhangup:
9476 ret = get_errno(vhangup());
9478 #ifdef TARGET_NR_idle
9479 case TARGET_NR_idle:
9482 #ifdef TARGET_NR_syscall
9483 case TARGET_NR_syscall:
9484 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9485 arg6, arg7, arg8, 0);
9488 case TARGET_NR_wait4:
9491 abi_long status_ptr = arg2;
9492 struct rusage rusage, *rusage_ptr;
9493 abi_ulong target_rusage = arg4;
9494 abi_long rusage_err;
9496 rusage_ptr = &rusage;
9499 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9500 if (!is_error(ret)) {
9501 if (status_ptr && ret) {
9502 status = host_to_target_waitstatus(status);
9503 if (put_user_s32(status, status_ptr))
9506 if (target_rusage) {
9507 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9515 #ifdef TARGET_NR_swapoff
9516 case TARGET_NR_swapoff:
9517 if (!(p = lock_user_string(arg1)))
9519 ret = get_errno(swapoff(p));
9520 unlock_user(p, arg1, 0);
9523 case TARGET_NR_sysinfo:
9525 struct target_sysinfo *target_value;
9526 struct sysinfo value;
9527 ret = get_errno(sysinfo(&value));
9528 if (!is_error(ret) && arg1)
9530 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9532 __put_user(value.uptime, &target_value->uptime);
9533 __put_user(value.loads[0], &target_value->loads[0]);
9534 __put_user(value.loads[1], &target_value->loads[1]);
9535 __put_user(value.loads[2], &target_value->loads[2]);
9536 __put_user(value.totalram, &target_value->totalram);
9537 __put_user(value.freeram, &target_value->freeram);
9538 __put_user(value.sharedram, &target_value->sharedram);
9539 __put_user(value.bufferram, &target_value->bufferram);
9540 __put_user(value.totalswap, &target_value->totalswap);
9541 __put_user(value.freeswap, &target_value->freeswap);
9542 __put_user(value.procs, &target_value->procs);
9543 __put_user(value.totalhigh, &target_value->totalhigh);
9544 __put_user(value.freehigh, &target_value->freehigh);
9545 __put_user(value.mem_unit, &target_value->mem_unit);
9546 unlock_user_struct(target_value, arg1, 1);
9550 #ifdef TARGET_NR_ipc
9552 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9555 #ifdef TARGET_NR_semget
9556 case TARGET_NR_semget:
9557 ret = get_errno(semget(arg1, arg2, arg3));
9560 #ifdef TARGET_NR_semop
9561 case TARGET_NR_semop:
9562 ret = do_semop(arg1, arg2, arg3);
9565 #ifdef TARGET_NR_semctl
9566 case TARGET_NR_semctl:
9567 ret = do_semctl(arg1, arg2, arg3, arg4);
9570 #ifdef TARGET_NR_msgctl
9571 case TARGET_NR_msgctl:
9572 ret = do_msgctl(arg1, arg2, arg3);
9575 #ifdef TARGET_NR_msgget
9576 case TARGET_NR_msgget:
9577 ret = get_errno(msgget(arg1, arg2));
9580 #ifdef TARGET_NR_msgrcv
9581 case TARGET_NR_msgrcv:
9582 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9585 #ifdef TARGET_NR_msgsnd
9586 case TARGET_NR_msgsnd:
9587 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9590 #ifdef TARGET_NR_shmget
9591 case TARGET_NR_shmget:
9592 ret = get_errno(shmget(arg1, arg2, arg3));
9595 #ifdef TARGET_NR_shmctl
9596 case TARGET_NR_shmctl:
9597 ret = do_shmctl(arg1, arg2, arg3);
9600 #ifdef TARGET_NR_shmat
9601 case TARGET_NR_shmat:
9602 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9605 #ifdef TARGET_NR_shmdt
9606 case TARGET_NR_shmdt:
9607 ret = do_shmdt(arg1);
9610 case TARGET_NR_fsync:
9611 ret = get_errno(fsync(arg1));
9613 case TARGET_NR_clone:
9614 /* Linux manages to have three different orderings for its
9615 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9616 * match the kernel's CONFIG_CLONE_* settings.
9617 * Microblaze is further special in that it uses a sixth
9618 * implicit argument to clone for the TLS pointer.
9620 #if defined(TARGET_MICROBLAZE)
9621 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9622 #elif defined(TARGET_CLONE_BACKWARDS)
9623 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9624 #elif defined(TARGET_CLONE_BACKWARDS2)
9625 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9627 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9630 #ifdef __NR_exit_group
9631 /* new thread calls */
9632 case TARGET_NR_exit_group:
9636 gdb_exit(cpu_env, arg1);
9637 ret = get_errno(exit_group(arg1));
9640 case TARGET_NR_setdomainname:
9641 if (!(p = lock_user_string(arg1)))
9643 ret = get_errno(setdomainname(p, arg2));
9644 unlock_user(p, arg1, 0);
9646 case TARGET_NR_uname:
9647 /* no need to transcode because we use the linux syscall */
9649 struct new_utsname * buf;
9651 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9653 ret = get_errno(sys_uname(buf));
9654 if (!is_error(ret)) {
9655 /* Overwrite the native machine name with whatever is being
9657 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9658 /* Allow the user to override the reported release. */
9659 if (qemu_uname_release && *qemu_uname_release) {
9660 g_strlcpy(buf->release, qemu_uname_release,
9661 sizeof(buf->release));
9664 unlock_user_struct(buf, arg1, 1);
9668 case TARGET_NR_modify_ldt:
9669 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9671 #if !defined(TARGET_X86_64)
9672 case TARGET_NR_vm86old:
9674 case TARGET_NR_vm86:
9675 ret = do_vm86(cpu_env, arg1, arg2);
9679 case TARGET_NR_adjtimex:
9681 struct timex host_buf;
9683 if (target_to_host_timex(&host_buf, arg1) != 0) {
9686 ret = get_errno(adjtimex(&host_buf));
9687 if (!is_error(ret)) {
9688 if (host_to_target_timex(arg1, &host_buf) != 0) {
9694 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9695 case TARGET_NR_clock_adjtime:
9697 struct timex htx, *phtx = &htx;
9699 if (target_to_host_timex(phtx, arg2) != 0) {
9702 ret = get_errno(clock_adjtime(arg1, phtx));
9703 if (!is_error(ret) && phtx) {
9704 if (host_to_target_timex(arg2, phtx) != 0) {
9711 #ifdef TARGET_NR_create_module
9712 case TARGET_NR_create_module:
9714 case TARGET_NR_init_module:
9715 case TARGET_NR_delete_module:
9716 #ifdef TARGET_NR_get_kernel_syms
9717 case TARGET_NR_get_kernel_syms:
9720 case TARGET_NR_quotactl:
9722 case TARGET_NR_getpgid:
9723 ret = get_errno(getpgid(arg1));
9725 case TARGET_NR_fchdir:
9726 ret = get_errno(fchdir(arg1));
9728 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9729 case TARGET_NR_bdflush:
9732 #ifdef TARGET_NR_sysfs
9733 case TARGET_NR_sysfs:
9736 case TARGET_NR_personality:
9737 ret = get_errno(personality(arg1));
9739 #ifdef TARGET_NR_afs_syscall
9740 case TARGET_NR_afs_syscall:
9743 #ifdef TARGET_NR__llseek /* Not on alpha */
9744 case TARGET_NR__llseek:
9747 #if !defined(__NR_llseek)
9748 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9750 ret = get_errno(res);
9755 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9757 if ((ret == 0) && put_user_s64(res, arg4)) {
9763 #ifdef TARGET_NR_getdents
9764 case TARGET_NR_getdents:
9765 #ifdef __NR_getdents
9766 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9768 struct target_dirent *target_dirp;
9769 struct linux_dirent *dirp;
9770 abi_long count = arg3;
9772 dirp = g_try_malloc(count);
9774 ret = -TARGET_ENOMEM;
9778 ret = get_errno(sys_getdents(arg1, dirp, count));
9779 if (!is_error(ret)) {
9780 struct linux_dirent *de;
9781 struct target_dirent *tde;
9783 int reclen, treclen;
9784 int count1, tnamelen;
9788 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9792 reclen = de->d_reclen;
9793 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9794 assert(tnamelen >= 0);
9795 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9796 assert(count1 + treclen <= count);
9797 tde->d_reclen = tswap16(treclen);
9798 tde->d_ino = tswapal(de->d_ino);
9799 tde->d_off = tswapal(de->d_off);
9800 memcpy(tde->d_name, de->d_name, tnamelen);
9801 de = (struct linux_dirent *)((char *)de + reclen);
9803 tde = (struct target_dirent *)((char *)tde + treclen);
9807 unlock_user(target_dirp, arg2, ret);
9813 struct linux_dirent *dirp;
9814 abi_long count = arg3;
9816 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9818 ret = get_errno(sys_getdents(arg1, dirp, count));
9819 if (!is_error(ret)) {
9820 struct linux_dirent *de;
9825 reclen = de->d_reclen;
9828 de->d_reclen = tswap16(reclen);
9829 tswapls(&de->d_ino);
9830 tswapls(&de->d_off);
9831 de = (struct linux_dirent *)((char *)de + reclen);
9835 unlock_user(dirp, arg2, ret);
9839 /* Implement getdents in terms of getdents64 */
9841 struct linux_dirent64 *dirp;
9842 abi_long count = arg3;
9844 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9848 ret = get_errno(sys_getdents64(arg1, dirp, count));
9849 if (!is_error(ret)) {
9850 /* Convert the dirent64 structs to target dirent. We do this
9851 * in-place, since we can guarantee that a target_dirent is no
9852 * larger than a dirent64; however this means we have to be
9853 * careful to read everything before writing in the new format.
9855 struct linux_dirent64 *de;
9856 struct target_dirent *tde;
9861 tde = (struct target_dirent *)dirp;
9863 int namelen, treclen;
9864 int reclen = de->d_reclen;
9865 uint64_t ino = de->d_ino;
9866 int64_t off = de->d_off;
9867 uint8_t type = de->d_type;
9869 namelen = strlen(de->d_name);
9870 treclen = offsetof(struct target_dirent, d_name)
9872 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9874 memmove(tde->d_name, de->d_name, namelen + 1);
9875 tde->d_ino = tswapal(ino);
9876 tde->d_off = tswapal(off);
9877 tde->d_reclen = tswap16(treclen);
9878 /* The target_dirent type is in what was formerly a padding
9879 * byte at the end of the structure:
9881 *(((char *)tde) + treclen - 1) = type;
9883 de = (struct linux_dirent64 *)((char *)de + reclen);
9884 tde = (struct target_dirent *)((char *)tde + treclen);
9890 unlock_user(dirp, arg2, ret);
9894 #endif /* TARGET_NR_getdents */
9895 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9896 case TARGET_NR_getdents64:
9898 struct linux_dirent64 *dirp;
9899 abi_long count = arg3;
9900 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9902 ret = get_errno(sys_getdents64(arg1, dirp, count));
9903 if (!is_error(ret)) {
9904 struct linux_dirent64 *de;
9909 reclen = de->d_reclen;
9912 de->d_reclen = tswap16(reclen);
9913 tswap64s((uint64_t *)&de->d_ino);
9914 tswap64s((uint64_t *)&de->d_off);
9915 de = (struct linux_dirent64 *)((char *)de + reclen);
9919 unlock_user(dirp, arg2, ret);
9922 #endif /* TARGET_NR_getdents64 */
9923 #if defined(TARGET_NR__newselect)
9924 case TARGET_NR__newselect:
9925 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9928 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9929 # ifdef TARGET_NR_poll
9930 case TARGET_NR_poll:
9932 # ifdef TARGET_NR_ppoll
9933 case TARGET_NR_ppoll:
9936 struct target_pollfd *target_pfd;
9937 unsigned int nfds = arg2;
9944 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9945 ret = -TARGET_EINVAL;
9949 target_pfd = lock_user(VERIFY_WRITE, arg1,
9950 sizeof(struct target_pollfd) * nfds, 1);
9955 pfd = alloca(sizeof(struct pollfd) * nfds);
9956 for (i = 0; i < nfds; i++) {
9957 pfd[i].fd = tswap32(target_pfd[i].fd);
9958 pfd[i].events = tswap16(target_pfd[i].events);
9963 # ifdef TARGET_NR_ppoll
9964 case TARGET_NR_ppoll:
9966 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9967 target_sigset_t *target_set;
9968 sigset_t _set, *set = &_set;
9971 if (target_to_host_timespec(timeout_ts, arg3)) {
9972 unlock_user(target_pfd, arg1, 0);
9980 if (arg5 != sizeof(target_sigset_t)) {
9981 unlock_user(target_pfd, arg1, 0);
9982 ret = -TARGET_EINVAL;
9986 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9988 unlock_user(target_pfd, arg1, 0);
9991 target_to_host_sigset(set, target_set);
9996 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9997 set, SIGSET_T_SIZE));
9999 if (!is_error(ret) && arg3) {
10000 host_to_target_timespec(arg3, timeout_ts);
10003 unlock_user(target_set, arg4, 0);
10008 # ifdef TARGET_NR_poll
10009 case TARGET_NR_poll:
10011 struct timespec ts, *pts;
10014 /* Convert ms to secs, ns */
10015 ts.tv_sec = arg3 / 1000;
10016 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10019 /* -ve poll() timeout means "infinite" */
10022 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10027 g_assert_not_reached();
10030 if (!is_error(ret)) {
10031 for(i = 0; i < nfds; i++) {
10032 target_pfd[i].revents = tswap16(pfd[i].revents);
10035 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10039 case TARGET_NR_flock:
10040 /* NOTE: the flock constant seems to be the same for every
10042 ret = get_errno(safe_flock(arg1, arg2));
10044 case TARGET_NR_readv:
10046 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10048 ret = get_errno(safe_readv(arg1, vec, arg3));
10049 unlock_iovec(vec, arg2, arg3, 1);
10051 ret = -host_to_target_errno(errno);
10055 case TARGET_NR_writev:
10057 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10059 ret = get_errno(safe_writev(arg1, vec, arg3));
10060 unlock_iovec(vec, arg2, arg3, 0);
10062 ret = -host_to_target_errno(errno);
10066 #if defined(TARGET_NR_preadv)
10067 case TARGET_NR_preadv:
10069 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10071 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
10072 unlock_iovec(vec, arg2, arg3, 1);
10074 ret = -host_to_target_errno(errno);
10079 #if defined(TARGET_NR_pwritev)
10080 case TARGET_NR_pwritev:
10082 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10084 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
10085 unlock_iovec(vec, arg2, arg3, 0);
10087 ret = -host_to_target_errno(errno);
10092 case TARGET_NR_getsid:
10093 ret = get_errno(getsid(arg1));
10095 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10096 case TARGET_NR_fdatasync:
10097 ret = get_errno(fdatasync(arg1));
10100 #ifdef TARGET_NR__sysctl
10101 case TARGET_NR__sysctl:
10102 /* We don't implement this, but ENOTDIR is always a safe
10104 ret = -TARGET_ENOTDIR;
10107 case TARGET_NR_sched_getaffinity:
10109 unsigned int mask_size;
10110 unsigned long *mask;
10113 * sched_getaffinity needs multiples of ulong, so need to take
10114 * care of mismatches between target ulong and host ulong sizes.
10116 if (arg2 & (sizeof(abi_ulong) - 1)) {
10117 ret = -TARGET_EINVAL;
10120 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10122 mask = alloca(mask_size);
10123 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10125 if (!is_error(ret)) {
10127 /* More data returned than the caller's buffer will fit.
10128 * This only happens if sizeof(abi_long) < sizeof(long)
10129 * and the caller passed us a buffer holding an odd number
10130 * of abi_longs. If the host kernel is actually using the
10131 * extra 4 bytes then fail EINVAL; otherwise we can just
10132 * ignore them and only copy the interesting part.
10134 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10135 if (numcpus > arg2 * 8) {
10136 ret = -TARGET_EINVAL;
10142 if (copy_to_user(arg3, mask, ret)) {
10148 case TARGET_NR_sched_setaffinity:
10150 unsigned int mask_size;
10151 unsigned long *mask;
10154 * sched_setaffinity needs multiples of ulong, so need to take
10155 * care of mismatches between target ulong and host ulong sizes.
10157 if (arg2 & (sizeof(abi_ulong) - 1)) {
10158 ret = -TARGET_EINVAL;
10161 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10163 mask = alloca(mask_size);
10164 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
10167 memcpy(mask, p, arg2);
10168 unlock_user_struct(p, arg2, 0);
10170 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10173 case TARGET_NR_sched_setparam:
10175 struct sched_param *target_schp;
10176 struct sched_param schp;
10179 return -TARGET_EINVAL;
10181 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10183 schp.sched_priority = tswap32(target_schp->sched_priority);
10184 unlock_user_struct(target_schp, arg2, 0);
10185 ret = get_errno(sched_setparam(arg1, &schp));
10188 case TARGET_NR_sched_getparam:
10190 struct sched_param *target_schp;
10191 struct sched_param schp;
10194 return -TARGET_EINVAL;
10196 ret = get_errno(sched_getparam(arg1, &schp));
10197 if (!is_error(ret)) {
10198 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10200 target_schp->sched_priority = tswap32(schp.sched_priority);
10201 unlock_user_struct(target_schp, arg2, 1);
10205 case TARGET_NR_sched_setscheduler:
10207 struct sched_param *target_schp;
10208 struct sched_param schp;
10210 return -TARGET_EINVAL;
10212 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10214 schp.sched_priority = tswap32(target_schp->sched_priority);
10215 unlock_user_struct(target_schp, arg3, 0);
10216 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10219 case TARGET_NR_sched_getscheduler:
10220 ret = get_errno(sched_getscheduler(arg1));
10222 case TARGET_NR_sched_yield:
10223 ret = get_errno(sched_yield());
10225 case TARGET_NR_sched_get_priority_max:
10226 ret = get_errno(sched_get_priority_max(arg1));
10228 case TARGET_NR_sched_get_priority_min:
10229 ret = get_errno(sched_get_priority_min(arg1));
10231 case TARGET_NR_sched_rr_get_interval:
10233 struct timespec ts;
10234 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10235 if (!is_error(ret)) {
10236 ret = host_to_target_timespec(arg2, &ts);
10240 case TARGET_NR_nanosleep:
10242 struct timespec req, rem;
10243 target_to_host_timespec(&req, arg1);
10244 ret = get_errno(safe_nanosleep(&req, &rem));
10245 if (is_error(ret) && arg2) {
10246 host_to_target_timespec(arg2, &rem);
10250 #ifdef TARGET_NR_query_module
10251 case TARGET_NR_query_module:
10252 goto unimplemented;
10254 #ifdef TARGET_NR_nfsservctl
10255 case TARGET_NR_nfsservctl:
10256 goto unimplemented;
10258 case TARGET_NR_prctl:
10260 case PR_GET_PDEATHSIG:
10263 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10264 if (!is_error(ret) && arg2
10265 && put_user_ual(deathsig, arg2)) {
10273 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10277 ret = get_errno(prctl(arg1, (unsigned long)name,
10278 arg3, arg4, arg5));
10279 unlock_user(name, arg2, 16);
10284 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10288 ret = get_errno(prctl(arg1, (unsigned long)name,
10289 arg3, arg4, arg5));
10290 unlock_user(name, arg2, 0);
10295 /* Most prctl options have no pointer arguments */
10296 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10300 #ifdef TARGET_NR_arch_prctl
10301 case TARGET_NR_arch_prctl:
10302 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10303 ret = do_arch_prctl(cpu_env, arg1, arg2);
10306 goto unimplemented;
10309 #ifdef TARGET_NR_pread64
10310 case TARGET_NR_pread64:
10311 if (regpairs_aligned(cpu_env)) {
10315 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10317 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10318 unlock_user(p, arg2, ret);
10320 case TARGET_NR_pwrite64:
10321 if (regpairs_aligned(cpu_env)) {
10325 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10327 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10328 unlock_user(p, arg2, 0);
10331 case TARGET_NR_getcwd:
10332 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10334 ret = get_errno(sys_getcwd1(p, arg2));
10335 unlock_user(p, arg1, ret);
10337 case TARGET_NR_capget:
10338 case TARGET_NR_capset:
10340 struct target_user_cap_header *target_header;
10341 struct target_user_cap_data *target_data = NULL;
10342 struct __user_cap_header_struct header;
10343 struct __user_cap_data_struct data[2];
10344 struct __user_cap_data_struct *dataptr = NULL;
10345 int i, target_datalen;
10346 int data_items = 1;
10348 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10351 header.version = tswap32(target_header->version);
10352 header.pid = tswap32(target_header->pid);
10354 if (header.version != _LINUX_CAPABILITY_VERSION) {
10355 /* Version 2 and up takes pointer to two user_data structs */
10359 target_datalen = sizeof(*target_data) * data_items;
10362 if (num == TARGET_NR_capget) {
10363 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10365 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10367 if (!target_data) {
10368 unlock_user_struct(target_header, arg1, 0);
10372 if (num == TARGET_NR_capset) {
10373 for (i = 0; i < data_items; i++) {
10374 data[i].effective = tswap32(target_data[i].effective);
10375 data[i].permitted = tswap32(target_data[i].permitted);
10376 data[i].inheritable = tswap32(target_data[i].inheritable);
10383 if (num == TARGET_NR_capget) {
10384 ret = get_errno(capget(&header, dataptr));
10386 ret = get_errno(capset(&header, dataptr));
10389 /* The kernel always updates version for both capget and capset */
10390 target_header->version = tswap32(header.version);
10391 unlock_user_struct(target_header, arg1, 1);
10394 if (num == TARGET_NR_capget) {
10395 for (i = 0; i < data_items; i++) {
10396 target_data[i].effective = tswap32(data[i].effective);
10397 target_data[i].permitted = tswap32(data[i].permitted);
10398 target_data[i].inheritable = tswap32(data[i].inheritable);
10400 unlock_user(target_data, arg2, target_datalen);
10402 unlock_user(target_data, arg2, 0);
10407 case TARGET_NR_sigaltstack:
10408 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10411 #ifdef CONFIG_SENDFILE
10412 case TARGET_NR_sendfile:
10414 off_t *offp = NULL;
10417 ret = get_user_sal(off, arg3);
10418 if (is_error(ret)) {
10423 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10424 if (!is_error(ret) && arg3) {
10425 abi_long ret2 = put_user_sal(off, arg3);
10426 if (is_error(ret2)) {
10432 #ifdef TARGET_NR_sendfile64
10433 case TARGET_NR_sendfile64:
10435 off_t *offp = NULL;
10438 ret = get_user_s64(off, arg3);
10439 if (is_error(ret)) {
10444 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10445 if (!is_error(ret) && arg3) {
10446 abi_long ret2 = put_user_s64(off, arg3);
10447 if (is_error(ret2)) {
10455 case TARGET_NR_sendfile:
10456 #ifdef TARGET_NR_sendfile64
10457 case TARGET_NR_sendfile64:
10459 goto unimplemented;
10462 #ifdef TARGET_NR_getpmsg
10463 case TARGET_NR_getpmsg:
10464 goto unimplemented;
10466 #ifdef TARGET_NR_putpmsg
10467 case TARGET_NR_putpmsg:
10468 goto unimplemented;
10470 #ifdef TARGET_NR_vfork
10471 case TARGET_NR_vfork:
10472 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10476 #ifdef TARGET_NR_ugetrlimit
10477 case TARGET_NR_ugetrlimit:
10479 struct rlimit rlim;
10480 int resource = target_to_host_resource(arg1);
10481 ret = get_errno(getrlimit(resource, &rlim));
10482 if (!is_error(ret)) {
10483 struct target_rlimit *target_rlim;
10484 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10486 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10487 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10488 unlock_user_struct(target_rlim, arg2, 1);
10493 #ifdef TARGET_NR_truncate64
10494 case TARGET_NR_truncate64:
10495 if (!(p = lock_user_string(arg1)))
10497 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10498 unlock_user(p, arg1, 0);
10501 #ifdef TARGET_NR_ftruncate64
10502 case TARGET_NR_ftruncate64:
10503 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10506 #ifdef TARGET_NR_stat64
10507 case TARGET_NR_stat64:
10508 if (!(p = lock_user_string(arg1)))
10510 ret = get_errno(stat(path(p), &st));
10511 unlock_user(p, arg1, 0);
10512 if (!is_error(ret))
10513 ret = host_to_target_stat64(cpu_env, arg2, &st);
10516 #ifdef TARGET_NR_lstat64
10517 case TARGET_NR_lstat64:
10518 if (!(p = lock_user_string(arg1)))
10520 ret = get_errno(lstat(path(p), &st));
10521 unlock_user(p, arg1, 0);
10522 if (!is_error(ret))
10523 ret = host_to_target_stat64(cpu_env, arg2, &st);
10526 #ifdef TARGET_NR_fstat64
10527 case TARGET_NR_fstat64:
10528 ret = get_errno(fstat(arg1, &st));
10529 if (!is_error(ret))
10530 ret = host_to_target_stat64(cpu_env, arg2, &st);
10533 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10534 #ifdef TARGET_NR_fstatat64
10535 case TARGET_NR_fstatat64:
10537 #ifdef TARGET_NR_newfstatat
10538 case TARGET_NR_newfstatat:
10540 if (!(p = lock_user_string(arg2)))
10542 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10543 if (!is_error(ret))
10544 ret = host_to_target_stat64(cpu_env, arg3, &st);
10547 #ifdef TARGET_NR_lchown
10548 case TARGET_NR_lchown:
10549 if (!(p = lock_user_string(arg1)))
10551 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10552 unlock_user(p, arg1, 0);
10555 #ifdef TARGET_NR_getuid
10556 case TARGET_NR_getuid:
10557 ret = get_errno(high2lowuid(getuid()));
10560 #ifdef TARGET_NR_getgid
10561 case TARGET_NR_getgid:
10562 ret = get_errno(high2lowgid(getgid()));
10565 #ifdef TARGET_NR_geteuid
10566 case TARGET_NR_geteuid:
10567 ret = get_errno(high2lowuid(geteuid()));
10570 #ifdef TARGET_NR_getegid
10571 case TARGET_NR_getegid:
10572 ret = get_errno(high2lowgid(getegid()));
10575 case TARGET_NR_setreuid:
10576 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10578 case TARGET_NR_setregid:
10579 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10581 case TARGET_NR_getgroups:
10583 int gidsetsize = arg1;
10584 target_id *target_grouplist;
10588 grouplist = alloca(gidsetsize * sizeof(gid_t));
10589 ret = get_errno(getgroups(gidsetsize, grouplist));
10590 if (gidsetsize == 0)
10592 if (!is_error(ret)) {
10593 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10594 if (!target_grouplist)
10596 for(i = 0;i < ret; i++)
10597 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10598 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10602 case TARGET_NR_setgroups:
10604 int gidsetsize = arg1;
10605 target_id *target_grouplist;
10606 gid_t *grouplist = NULL;
10609 grouplist = alloca(gidsetsize * sizeof(gid_t));
10610 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10611 if (!target_grouplist) {
10612 ret = -TARGET_EFAULT;
10615 for (i = 0; i < gidsetsize; i++) {
10616 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10618 unlock_user(target_grouplist, arg2, 0);
10620 ret = get_errno(setgroups(gidsetsize, grouplist));
10623 case TARGET_NR_fchown:
10624 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10626 #if defined(TARGET_NR_fchownat)
10627 case TARGET_NR_fchownat:
10628 if (!(p = lock_user_string(arg2)))
10630 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10631 low2highgid(arg4), arg5));
10632 unlock_user(p, arg2, 0);
10635 #ifdef TARGET_NR_setresuid
10636 case TARGET_NR_setresuid:
10637 ret = get_errno(sys_setresuid(low2highuid(arg1),
10639 low2highuid(arg3)));
10642 #ifdef TARGET_NR_getresuid
10643 case TARGET_NR_getresuid:
10645 uid_t ruid, euid, suid;
10646 ret = get_errno(getresuid(&ruid, &euid, &suid));
10647 if (!is_error(ret)) {
10648 if (put_user_id(high2lowuid(ruid), arg1)
10649 || put_user_id(high2lowuid(euid), arg2)
10650 || put_user_id(high2lowuid(suid), arg3))
10656 #ifdef TARGET_NR_getresgid
10657 case TARGET_NR_setresgid:
10658 ret = get_errno(sys_setresgid(low2highgid(arg1),
10660 low2highgid(arg3)));
10663 #ifdef TARGET_NR_getresgid
10664 case TARGET_NR_getresgid:
10666 gid_t rgid, egid, sgid;
10667 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10668 if (!is_error(ret)) {
10669 if (put_user_id(high2lowgid(rgid), arg1)
10670 || put_user_id(high2lowgid(egid), arg2)
10671 || put_user_id(high2lowgid(sgid), arg3))
10677 #ifdef TARGET_NR_chown
10678 case TARGET_NR_chown:
10679 if (!(p = lock_user_string(arg1)))
10681 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10682 unlock_user(p, arg1, 0);
10685 case TARGET_NR_setuid:
10686 ret = get_errno(sys_setuid(low2highuid(arg1)));
10688 case TARGET_NR_setgid:
10689 ret = get_errno(sys_setgid(low2highgid(arg1)));
10691 case TARGET_NR_setfsuid:
10692 ret = get_errno(setfsuid(arg1));
10694 case TARGET_NR_setfsgid:
10695 ret = get_errno(setfsgid(arg1));
10698 #ifdef TARGET_NR_lchown32
10699 case TARGET_NR_lchown32:
10700 if (!(p = lock_user_string(arg1)))
10702 ret = get_errno(lchown(p, arg2, arg3));
10703 unlock_user(p, arg1, 0);
10706 #ifdef TARGET_NR_getuid32
10707 case TARGET_NR_getuid32:
10708 ret = get_errno(getuid());
10712 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10713 /* Alpha specific */
10714 case TARGET_NR_getxuid:
10718 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10720 ret = get_errno(getuid());
10723 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10724 /* Alpha specific */
10725 case TARGET_NR_getxgid:
10729 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10731 ret = get_errno(getgid());
10734 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10735 /* Alpha specific */
10736 case TARGET_NR_osf_getsysinfo:
10737 ret = -TARGET_EOPNOTSUPP;
10739 case TARGET_GSI_IEEE_FP_CONTROL:
10741 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10743 /* Copied from linux ieee_fpcr_to_swcr. */
10744 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10745 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10746 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10747 | SWCR_TRAP_ENABLE_DZE
10748 | SWCR_TRAP_ENABLE_OVF);
10749 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10750 | SWCR_TRAP_ENABLE_INE);
10751 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10752 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10754 if (put_user_u64 (swcr, arg2))
10760 /* case GSI_IEEE_STATE_AT_SIGNAL:
10761 -- Not implemented in linux kernel.
10763 -- Retrieves current unaligned access state; not much used.
10764 case GSI_PROC_TYPE:
10765 -- Retrieves implver information; surely not used.
10766 case GSI_GET_HWRPB:
10767 -- Grabs a copy of the HWRPB; surely not used.
10772 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10773 /* Alpha specific */
10774 case TARGET_NR_osf_setsysinfo:
10775 ret = -TARGET_EOPNOTSUPP;
10777 case TARGET_SSI_IEEE_FP_CONTROL:
10779 uint64_t swcr, fpcr, orig_fpcr;
10781 if (get_user_u64 (swcr, arg2)) {
10784 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10785 fpcr = orig_fpcr & FPCR_DYN_MASK;
10787 /* Copied from linux ieee_swcr_to_fpcr. */
10788 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10789 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10790 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10791 | SWCR_TRAP_ENABLE_DZE
10792 | SWCR_TRAP_ENABLE_OVF)) << 48;
10793 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10794 | SWCR_TRAP_ENABLE_INE)) << 57;
10795 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10796 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10798 cpu_alpha_store_fpcr(cpu_env, fpcr);
10803 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10805 uint64_t exc, fpcr, orig_fpcr;
10808 if (get_user_u64(exc, arg2)) {
10812 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10814 /* We only add to the exception status here. */
10815 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10817 cpu_alpha_store_fpcr(cpu_env, fpcr);
10820 /* Old exceptions are not signaled. */
10821 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10823 /* If any exceptions set by this call,
10824 and are unmasked, send a signal. */
10826 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10827 si_code = TARGET_FPE_FLTRES;
10829 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10830 si_code = TARGET_FPE_FLTUND;
10832 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10833 si_code = TARGET_FPE_FLTOVF;
10835 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10836 si_code = TARGET_FPE_FLTDIV;
10838 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10839 si_code = TARGET_FPE_FLTINV;
10841 if (si_code != 0) {
10842 target_siginfo_t info;
10843 info.si_signo = SIGFPE;
10845 info.si_code = si_code;
10846 info._sifields._sigfault._addr
10847 = ((CPUArchState *)cpu_env)->pc;
10848 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10849 QEMU_SI_FAULT, &info);
10854 /* case SSI_NVPAIRS:
10855 -- Used with SSIN_UACPROC to enable unaligned accesses.
10856 case SSI_IEEE_STATE_AT_SIGNAL:
10857 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10858 -- Not implemented in linux kernel
10863 #ifdef TARGET_NR_osf_sigprocmask
10864 /* Alpha specific. */
10865 case TARGET_NR_osf_sigprocmask:
10869 sigset_t set, oldset;
10872 case TARGET_SIG_BLOCK:
10875 case TARGET_SIG_UNBLOCK:
10878 case TARGET_SIG_SETMASK:
10882 ret = -TARGET_EINVAL;
10886 target_to_host_old_sigset(&set, &mask);
10887 ret = do_sigprocmask(how, &set, &oldset);
10889 host_to_target_old_sigset(&mask, &oldset);
10896 #ifdef TARGET_NR_getgid32
10897 case TARGET_NR_getgid32:
10898 ret = get_errno(getgid());
10901 #ifdef TARGET_NR_geteuid32
10902 case TARGET_NR_geteuid32:
10903 ret = get_errno(geteuid());
10906 #ifdef TARGET_NR_getegid32
10907 case TARGET_NR_getegid32:
10908 ret = get_errno(getegid());
10911 #ifdef TARGET_NR_setreuid32
10912 case TARGET_NR_setreuid32:
10913 ret = get_errno(setreuid(arg1, arg2));
10916 #ifdef TARGET_NR_setregid32
10917 case TARGET_NR_setregid32:
10918 ret = get_errno(setregid(arg1, arg2));
10921 #ifdef TARGET_NR_getgroups32
10922 case TARGET_NR_getgroups32:
10924 int gidsetsize = arg1;
10925 uint32_t *target_grouplist;
10929 grouplist = alloca(gidsetsize * sizeof(gid_t));
10930 ret = get_errno(getgroups(gidsetsize, grouplist));
10931 if (gidsetsize == 0)
10933 if (!is_error(ret)) {
10934 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10935 if (!target_grouplist) {
10936 ret = -TARGET_EFAULT;
10939 for(i = 0;i < ret; i++)
10940 target_grouplist[i] = tswap32(grouplist[i]);
10941 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10946 #ifdef TARGET_NR_setgroups32
10947 case TARGET_NR_setgroups32:
10949 int gidsetsize = arg1;
10950 uint32_t *target_grouplist;
10954 grouplist = alloca(gidsetsize * sizeof(gid_t));
10955 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10956 if (!target_grouplist) {
10957 ret = -TARGET_EFAULT;
10960 for(i = 0;i < gidsetsize; i++)
10961 grouplist[i] = tswap32(target_grouplist[i]);
10962 unlock_user(target_grouplist, arg2, 0);
10963 ret = get_errno(setgroups(gidsetsize, grouplist));
10967 #ifdef TARGET_NR_fchown32
10968 case TARGET_NR_fchown32:
10969 ret = get_errno(fchown(arg1, arg2, arg3));
10972 #ifdef TARGET_NR_setresuid32
10973 case TARGET_NR_setresuid32:
10974 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10977 #ifdef TARGET_NR_getresuid32
10978 case TARGET_NR_getresuid32:
10980 uid_t ruid, euid, suid;
10981 ret = get_errno(getresuid(&ruid, &euid, &suid));
10982 if (!is_error(ret)) {
10983 if (put_user_u32(ruid, arg1)
10984 || put_user_u32(euid, arg2)
10985 || put_user_u32(suid, arg3))
10991 #ifdef TARGET_NR_setresgid32
10992 case TARGET_NR_setresgid32:
10993 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10996 #ifdef TARGET_NR_getresgid32
10997 case TARGET_NR_getresgid32:
10999 gid_t rgid, egid, sgid;
11000 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11001 if (!is_error(ret)) {
11002 if (put_user_u32(rgid, arg1)
11003 || put_user_u32(egid, arg2)
11004 || put_user_u32(sgid, arg3))
11010 #ifdef TARGET_NR_chown32
11011 case TARGET_NR_chown32:
11012 if (!(p = lock_user_string(arg1)))
11014 ret = get_errno(chown(p, arg2, arg3));
11015 unlock_user(p, arg1, 0);
11018 #ifdef TARGET_NR_setuid32
11019 case TARGET_NR_setuid32:
11020 ret = get_errno(sys_setuid(arg1));
11023 #ifdef TARGET_NR_setgid32
11024 case TARGET_NR_setgid32:
11025 ret = get_errno(sys_setgid(arg1));
11028 #ifdef TARGET_NR_setfsuid32
11029 case TARGET_NR_setfsuid32:
11030 ret = get_errno(setfsuid(arg1));
11033 #ifdef TARGET_NR_setfsgid32
11034 case TARGET_NR_setfsgid32:
11035 ret = get_errno(setfsgid(arg1));
11039 case TARGET_NR_pivot_root:
11040 goto unimplemented;
11041 #ifdef TARGET_NR_mincore
11042 case TARGET_NR_mincore:
11045 ret = -TARGET_EFAULT;
11046 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
11048 if (!(p = lock_user_string(arg3)))
11050 ret = get_errno(mincore(a, arg2, p));
11051 unlock_user(p, arg3, ret);
11053 unlock_user(a, arg1, 0);
11057 #ifdef TARGET_NR_arm_fadvise64_64
11058 case TARGET_NR_arm_fadvise64_64:
11059 /* arm_fadvise64_64 looks like fadvise64_64 but
11060 * with different argument order: fd, advice, offset, len
11061 * rather than the usual fd, offset, len, advice.
11062 * Note that offset and len are both 64-bit so appear as
11063 * pairs of 32-bit registers.
11065 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11066 target_offset64(arg5, arg6), arg2);
11067 ret = -host_to_target_errno(ret);
11071 #if TARGET_ABI_BITS == 32
11073 #ifdef TARGET_NR_fadvise64_64
11074 case TARGET_NR_fadvise64_64:
11075 /* 6 args: fd, offset (high, low), len (high, low), advice */
11076 if (regpairs_aligned(cpu_env)) {
11077 /* offset is in (3,4), len in (5,6) and advice in 7 */
11084 ret = -host_to_target_errno(posix_fadvise(arg1,
11085 target_offset64(arg2, arg3),
11086 target_offset64(arg4, arg5),
11091 #ifdef TARGET_NR_fadvise64
11092 case TARGET_NR_fadvise64:
11093 /* 5 args: fd, offset (high, low), len, advice */
11094 if (regpairs_aligned(cpu_env)) {
11095 /* offset is in (3,4), len in 5 and advice in 6 */
11101 ret = -host_to_target_errno(posix_fadvise(arg1,
11102 target_offset64(arg2, arg3),
11107 #else /* not a 32-bit ABI */
11108 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11109 #ifdef TARGET_NR_fadvise64_64
11110 case TARGET_NR_fadvise64_64:
11112 #ifdef TARGET_NR_fadvise64
11113 case TARGET_NR_fadvise64:
11115 #ifdef TARGET_S390X
11117 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11118 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11119 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11120 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11124 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11127 #endif /* end of 64-bit ABI fadvise handling */
11129 #ifdef TARGET_NR_madvise
11130 case TARGET_NR_madvise:
11131 /* A straight passthrough may not be safe because qemu sometimes
11132 turns private file-backed mappings into anonymous mappings.
11133 This will break MADV_DONTNEED.
11134 This is a hint, so ignoring and returning success is ok. */
11135 ret = get_errno(0);
11138 #if TARGET_ABI_BITS == 32
11139 case TARGET_NR_fcntl64:
11143 from_flock64_fn *copyfrom = copy_from_user_flock64;
11144 to_flock64_fn *copyto = copy_to_user_flock64;
11147 if (((CPUARMState *)cpu_env)->eabi) {
11148 copyfrom = copy_from_user_eabi_flock64;
11149 copyto = copy_to_user_eabi_flock64;
11153 cmd = target_to_host_fcntl_cmd(arg2);
11154 if (cmd == -TARGET_EINVAL) {
11160 case TARGET_F_GETLK64:
11161 ret = copyfrom(&fl, arg3);
11165 ret = get_errno(fcntl(arg1, cmd, &fl));
11167 ret = copyto(arg3, &fl);
11171 case TARGET_F_SETLK64:
11172 case TARGET_F_SETLKW64:
11173 ret = copyfrom(&fl, arg3);
11177 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11180 ret = do_fcntl(arg1, arg2, arg3);
11186 #ifdef TARGET_NR_cacheflush
11187 case TARGET_NR_cacheflush:
11188 /* self-modifying code is handled automatically, so nothing needed */
11192 #ifdef TARGET_NR_security
11193 case TARGET_NR_security:
11194 goto unimplemented;
11196 #ifdef TARGET_NR_getpagesize
11197 case TARGET_NR_getpagesize:
11198 ret = TARGET_PAGE_SIZE;
11201 case TARGET_NR_gettid:
11202 ret = get_errno(gettid());
11204 #ifdef TARGET_NR_readahead
11205 case TARGET_NR_readahead:
11206 #if TARGET_ABI_BITS == 32
11207 if (regpairs_aligned(cpu_env)) {
11212 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
11214 ret = get_errno(readahead(arg1, arg2, arg3));
11219 #ifdef TARGET_NR_setxattr
11220 case TARGET_NR_listxattr:
11221 case TARGET_NR_llistxattr:
11225 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11227 ret = -TARGET_EFAULT;
11231 p = lock_user_string(arg1);
11233 if (num == TARGET_NR_listxattr) {
11234 ret = get_errno(listxattr(p, b, arg3));
11236 ret = get_errno(llistxattr(p, b, arg3));
11239 ret = -TARGET_EFAULT;
11241 unlock_user(p, arg1, 0);
11242 unlock_user(b, arg2, arg3);
11245 case TARGET_NR_flistxattr:
11249 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11251 ret = -TARGET_EFAULT;
11255 ret = get_errno(flistxattr(arg1, b, arg3));
11256 unlock_user(b, arg2, arg3);
11259 case TARGET_NR_setxattr:
11260 case TARGET_NR_lsetxattr:
11262 void *p, *n, *v = 0;
11264 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11266 ret = -TARGET_EFAULT;
11270 p = lock_user_string(arg1);
11271 n = lock_user_string(arg2);
11273 if (num == TARGET_NR_setxattr) {
11274 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11276 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11279 ret = -TARGET_EFAULT;
11281 unlock_user(p, arg1, 0);
11282 unlock_user(n, arg2, 0);
11283 unlock_user(v, arg3, 0);
11286 case TARGET_NR_fsetxattr:
11290 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11292 ret = -TARGET_EFAULT;
11296 n = lock_user_string(arg2);
11298 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11300 ret = -TARGET_EFAULT;
11302 unlock_user(n, arg2, 0);
11303 unlock_user(v, arg3, 0);
11306 case TARGET_NR_getxattr:
11307 case TARGET_NR_lgetxattr:
11309 void *p, *n, *v = 0;
11311 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11313 ret = -TARGET_EFAULT;
11317 p = lock_user_string(arg1);
11318 n = lock_user_string(arg2);
11320 if (num == TARGET_NR_getxattr) {
11321 ret = get_errno(getxattr(p, n, v, arg4));
11323 ret = get_errno(lgetxattr(p, n, v, arg4));
11326 ret = -TARGET_EFAULT;
11328 unlock_user(p, arg1, 0);
11329 unlock_user(n, arg2, 0);
11330 unlock_user(v, arg3, arg4);
11333 case TARGET_NR_fgetxattr:
11337 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11339 ret = -TARGET_EFAULT;
11343 n = lock_user_string(arg2);
11345 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11347 ret = -TARGET_EFAULT;
11349 unlock_user(n, arg2, 0);
11350 unlock_user(v, arg3, arg4);
11353 case TARGET_NR_removexattr:
11354 case TARGET_NR_lremovexattr:
11357 p = lock_user_string(arg1);
11358 n = lock_user_string(arg2);
11360 if (num == TARGET_NR_removexattr) {
11361 ret = get_errno(removexattr(p, n));
11363 ret = get_errno(lremovexattr(p, n));
11366 ret = -TARGET_EFAULT;
11368 unlock_user(p, arg1, 0);
11369 unlock_user(n, arg2, 0);
11372 case TARGET_NR_fremovexattr:
11375 n = lock_user_string(arg2);
11377 ret = get_errno(fremovexattr(arg1, n));
11379 ret = -TARGET_EFAULT;
11381 unlock_user(n, arg2, 0);
11385 #endif /* CONFIG_ATTR */
11386 #ifdef TARGET_NR_set_thread_area
11387 case TARGET_NR_set_thread_area:
11388 #if defined(TARGET_MIPS)
11389 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11392 #elif defined(TARGET_CRIS)
11394 ret = -TARGET_EINVAL;
11396 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11400 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11401 ret = do_set_thread_area(cpu_env, arg1);
11403 #elif defined(TARGET_M68K)
11405 TaskState *ts = cpu->opaque;
11406 ts->tp_value = arg1;
11411 goto unimplemented_nowarn;
11414 #ifdef TARGET_NR_get_thread_area
11415 case TARGET_NR_get_thread_area:
11416 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11417 ret = do_get_thread_area(cpu_env, arg1);
11419 #elif defined(TARGET_M68K)
11421 TaskState *ts = cpu->opaque;
11422 ret = ts->tp_value;
11426 goto unimplemented_nowarn;
11429 #ifdef TARGET_NR_getdomainname
11430 case TARGET_NR_getdomainname:
11431 goto unimplemented_nowarn;
11434 #ifdef TARGET_NR_clock_gettime
11435 case TARGET_NR_clock_gettime:
11437 struct timespec ts;
11438 ret = get_errno(clock_gettime(arg1, &ts));
11439 if (!is_error(ret)) {
11440 host_to_target_timespec(arg2, &ts);
11445 #ifdef TARGET_NR_clock_getres
11446 case TARGET_NR_clock_getres:
11448 struct timespec ts;
11449 ret = get_errno(clock_getres(arg1, &ts));
11450 if (!is_error(ret)) {
11451 host_to_target_timespec(arg2, &ts);
11456 #ifdef TARGET_NR_clock_nanosleep
11457 case TARGET_NR_clock_nanosleep:
11459 struct timespec ts;
11460 target_to_host_timespec(&ts, arg3);
11461 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11462 &ts, arg4 ? &ts : NULL));
11464 host_to_target_timespec(arg4, &ts);
11466 #if defined(TARGET_PPC)
11467 /* clock_nanosleep is odd in that it returns positive errno values.
11468 * On PPC, CR0 bit 3 should be set in such a situation. */
11469 if (ret && ret != -TARGET_ERESTARTSYS) {
11470 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11477 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11478 case TARGET_NR_set_tid_address:
11479 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11483 case TARGET_NR_tkill:
11484 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11487 case TARGET_NR_tgkill:
11488 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11489 target_to_host_signal(arg3)));
11492 #ifdef TARGET_NR_set_robust_list
11493 case TARGET_NR_set_robust_list:
11494 case TARGET_NR_get_robust_list:
11495 /* The ABI for supporting robust futexes has userspace pass
11496 * the kernel a pointer to a linked list which is updated by
11497 * userspace after the syscall; the list is walked by the kernel
11498 * when the thread exits. Since the linked list in QEMU guest
11499 * memory isn't a valid linked list for the host and we have
11500 * no way to reliably intercept the thread-death event, we can't
11501 * support these. Silently return ENOSYS so that guest userspace
11502 * falls back to a non-robust futex implementation (which should
11503 * be OK except in the corner case of the guest crashing while
11504 * holding a mutex that is shared with another process via
11507 goto unimplemented_nowarn;
11510 #if defined(TARGET_NR_utimensat)
11511 case TARGET_NR_utimensat:
11513 struct timespec *tsp, ts[2];
11517 target_to_host_timespec(ts, arg3);
11518 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11522 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11524 if (!(p = lock_user_string(arg2))) {
11525 ret = -TARGET_EFAULT;
11528 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11529 unlock_user(p, arg2, 0);
11534 case TARGET_NR_futex:
11535 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11537 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11538 case TARGET_NR_inotify_init:
11539 ret = get_errno(sys_inotify_init());
11542 #ifdef CONFIG_INOTIFY1
11543 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11544 case TARGET_NR_inotify_init1:
11545 ret = get_errno(sys_inotify_init1(arg1));
11549 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11550 case TARGET_NR_inotify_add_watch:
11551 p = lock_user_string(arg2);
11552 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11553 unlock_user(p, arg2, 0);
11556 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11557 case TARGET_NR_inotify_rm_watch:
11558 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11562 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11563 case TARGET_NR_mq_open:
11565 struct mq_attr posix_mq_attr;
11568 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11569 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11572 p = lock_user_string(arg1 - 1);
11576 ret = get_errno(mq_open(p, host_flags, arg3, &posix_mq_attr));
11577 unlock_user (p, arg1, 0);
11581 case TARGET_NR_mq_unlink:
11582 p = lock_user_string(arg1 - 1);
11584 ret = -TARGET_EFAULT;
11587 ret = get_errno(mq_unlink(p));
11588 unlock_user (p, arg1, 0);
11591 case TARGET_NR_mq_timedsend:
11593 struct timespec ts;
11595 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11597 target_to_host_timespec(&ts, arg5);
11598 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11599 host_to_target_timespec(arg5, &ts);
11601 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11603 unlock_user (p, arg2, arg3);
11607 case TARGET_NR_mq_timedreceive:
11609 struct timespec ts;
11612 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11614 target_to_host_timespec(&ts, arg5);
11615 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11617 host_to_target_timespec(arg5, &ts);
11619 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11622 unlock_user (p, arg2, arg3);
11624 put_user_u32(prio, arg4);
11628 /* Not implemented for now... */
11629 /* case TARGET_NR_mq_notify: */
11632 case TARGET_NR_mq_getsetattr:
11634 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11637 ret = mq_getattr(arg1, &posix_mq_attr_out);
11638 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11641 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11642 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11649 #ifdef CONFIG_SPLICE
11650 #ifdef TARGET_NR_tee
11651 case TARGET_NR_tee:
11653 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11657 #ifdef TARGET_NR_splice
11658 case TARGET_NR_splice:
11660 loff_t loff_in, loff_out;
11661 loff_t *ploff_in = NULL, *ploff_out = NULL;
11663 if (get_user_u64(loff_in, arg2)) {
11666 ploff_in = &loff_in;
11669 if (get_user_u64(loff_out, arg4)) {
11672 ploff_out = &loff_out;
11674 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11676 if (put_user_u64(loff_in, arg2)) {
11681 if (put_user_u64(loff_out, arg4)) {
11688 #ifdef TARGET_NR_vmsplice
11689 case TARGET_NR_vmsplice:
11691 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11693 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11694 unlock_iovec(vec, arg2, arg3, 0);
11696 ret = -host_to_target_errno(errno);
11701 #endif /* CONFIG_SPLICE */
11702 #ifdef CONFIG_EVENTFD
11703 #if defined(TARGET_NR_eventfd)
11704 case TARGET_NR_eventfd:
11705 ret = get_errno(eventfd(arg1, 0));
11706 fd_trans_unregister(ret);
11709 #if defined(TARGET_NR_eventfd2)
11710 case TARGET_NR_eventfd2:
11712 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11713 if (arg2 & TARGET_O_NONBLOCK) {
11714 host_flags |= O_NONBLOCK;
11716 if (arg2 & TARGET_O_CLOEXEC) {
11717 host_flags |= O_CLOEXEC;
11719 ret = get_errno(eventfd(arg1, host_flags));
11720 fd_trans_unregister(ret);
11724 #endif /* CONFIG_EVENTFD */
11725 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11726 case TARGET_NR_fallocate:
11727 #if TARGET_ABI_BITS == 32
11728 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11729 target_offset64(arg5, arg6)));
11731 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11735 #if defined(CONFIG_SYNC_FILE_RANGE)
11736 #if defined(TARGET_NR_sync_file_range)
11737 case TARGET_NR_sync_file_range:
11738 #if TARGET_ABI_BITS == 32
11739 #if defined(TARGET_MIPS)
11740 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11741 target_offset64(arg5, arg6), arg7));
11743 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11744 target_offset64(arg4, arg5), arg6));
11745 #endif /* !TARGET_MIPS */
11747 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11751 #if defined(TARGET_NR_sync_file_range2)
11752 case TARGET_NR_sync_file_range2:
11753 /* This is like sync_file_range but the arguments are reordered */
11754 #if TARGET_ABI_BITS == 32
11755 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11756 target_offset64(arg5, arg6), arg2));
11758 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11763 #if defined(TARGET_NR_signalfd4)
11764 case TARGET_NR_signalfd4:
11765 ret = do_signalfd4(arg1, arg2, arg4);
11768 #if defined(TARGET_NR_signalfd)
11769 case TARGET_NR_signalfd:
11770 ret = do_signalfd4(arg1, arg2, 0);
11773 #if defined(CONFIG_EPOLL)
11774 #if defined(TARGET_NR_epoll_create)
11775 case TARGET_NR_epoll_create:
11776 ret = get_errno(epoll_create(arg1));
11779 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11780 case TARGET_NR_epoll_create1:
11781 ret = get_errno(epoll_create1(arg1));
11784 #if defined(TARGET_NR_epoll_ctl)
11785 case TARGET_NR_epoll_ctl:
11787 struct epoll_event ep;
11788 struct epoll_event *epp = 0;
11790 struct target_epoll_event *target_ep;
11791 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11794 ep.events = tswap32(target_ep->events);
11795 /* The epoll_data_t union is just opaque data to the kernel,
11796 * so we transfer all 64 bits across and need not worry what
11797 * actual data type it is.
11799 ep.data.u64 = tswap64(target_ep->data.u64);
11800 unlock_user_struct(target_ep, arg4, 0);
11803 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11808 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11809 #if defined(TARGET_NR_epoll_wait)
11810 case TARGET_NR_epoll_wait:
11812 #if defined(TARGET_NR_epoll_pwait)
11813 case TARGET_NR_epoll_pwait:
11816 struct target_epoll_event *target_ep;
11817 struct epoll_event *ep;
11819 int maxevents = arg3;
11820 int timeout = arg4;
11822 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11823 ret = -TARGET_EINVAL;
11827 target_ep = lock_user(VERIFY_WRITE, arg2,
11828 maxevents * sizeof(struct target_epoll_event), 1);
11833 ep = g_try_new(struct epoll_event, maxevents);
11835 unlock_user(target_ep, arg2, 0);
11836 ret = -TARGET_ENOMEM;
11841 #if defined(TARGET_NR_epoll_pwait)
11842 case TARGET_NR_epoll_pwait:
11844 target_sigset_t *target_set;
11845 sigset_t _set, *set = &_set;
11848 if (arg6 != sizeof(target_sigset_t)) {
11849 ret = -TARGET_EINVAL;
11853 target_set = lock_user(VERIFY_READ, arg5,
11854 sizeof(target_sigset_t), 1);
11856 ret = -TARGET_EFAULT;
11859 target_to_host_sigset(set, target_set);
11860 unlock_user(target_set, arg5, 0);
11865 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11866 set, SIGSET_T_SIZE));
11870 #if defined(TARGET_NR_epoll_wait)
11871 case TARGET_NR_epoll_wait:
11872 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11877 ret = -TARGET_ENOSYS;
11879 if (!is_error(ret)) {
11881 for (i = 0; i < ret; i++) {
11882 target_ep[i].events = tswap32(ep[i].events);
11883 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11885 unlock_user(target_ep, arg2,
11886 ret * sizeof(struct target_epoll_event));
11888 unlock_user(target_ep, arg2, 0);
11895 #ifdef TARGET_NR_prlimit64
11896 case TARGET_NR_prlimit64:
11898 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11899 struct target_rlimit64 *target_rnew, *target_rold;
11900 struct host_rlimit64 rnew, rold, *rnewp = 0;
11901 int resource = target_to_host_resource(arg2);
11903 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11906 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11907 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11908 unlock_user_struct(target_rnew, arg3, 0);
11912 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11913 if (!is_error(ret) && arg4) {
11914 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11917 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11918 target_rold->rlim_max = tswap64(rold.rlim_max);
11919 unlock_user_struct(target_rold, arg4, 1);
11924 #ifdef TARGET_NR_gethostname
11925 case TARGET_NR_gethostname:
11927 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11929 ret = get_errno(gethostname(name, arg2));
11930 unlock_user(name, arg1, arg2);
11932 ret = -TARGET_EFAULT;
11937 #ifdef TARGET_NR_atomic_cmpxchg_32
11938 case TARGET_NR_atomic_cmpxchg_32:
11940 /* should use start_exclusive from main.c */
11941 abi_ulong mem_value;
11942 if (get_user_u32(mem_value, arg6)) {
11943 target_siginfo_t info;
11944 info.si_signo = SIGSEGV;
11946 info.si_code = TARGET_SEGV_MAPERR;
11947 info._sifields._sigfault._addr = arg6;
11948 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11949 QEMU_SI_FAULT, &info);
11953 if (mem_value == arg2)
11954 put_user_u32(arg1, arg6);
11959 #ifdef TARGET_NR_atomic_barrier
11960 case TARGET_NR_atomic_barrier:
11962 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11968 #ifdef TARGET_NR_timer_create
11969 case TARGET_NR_timer_create:
11971 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11973 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11976 int timer_index = next_free_host_timer();
11978 if (timer_index < 0) {
11979 ret = -TARGET_EAGAIN;
11981 timer_t *phtimer = g_posix_timers + timer_index;
11984 phost_sevp = &host_sevp;
11985 ret = target_to_host_sigevent(phost_sevp, arg2);
11991 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11995 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12004 #ifdef TARGET_NR_timer_settime
12005 case TARGET_NR_timer_settime:
12007 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12008 * struct itimerspec * old_value */
12009 target_timer_t timerid = get_timer_id(arg1);
12013 } else if (arg3 == 0) {
12014 ret = -TARGET_EINVAL;
12016 timer_t htimer = g_posix_timers[timerid];
12017 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12019 target_to_host_itimerspec(&hspec_new, arg3);
12021 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12022 host_to_target_itimerspec(arg2, &hspec_old);
12028 #ifdef TARGET_NR_timer_gettime
12029 case TARGET_NR_timer_gettime:
12031 /* args: timer_t timerid, struct itimerspec *curr_value */
12032 target_timer_t timerid = get_timer_id(arg1);
12036 } else if (!arg2) {
12037 ret = -TARGET_EFAULT;
12039 timer_t htimer = g_posix_timers[timerid];
12040 struct itimerspec hspec;
12041 ret = get_errno(timer_gettime(htimer, &hspec));
12043 if (host_to_target_itimerspec(arg2, &hspec)) {
12044 ret = -TARGET_EFAULT;
12051 #ifdef TARGET_NR_timer_getoverrun
12052 case TARGET_NR_timer_getoverrun:
12054 /* args: timer_t timerid */
12055 target_timer_t timerid = get_timer_id(arg1);
12060 timer_t htimer = g_posix_timers[timerid];
12061 ret = get_errno(timer_getoverrun(htimer));
12063 fd_trans_unregister(ret);
12068 #ifdef TARGET_NR_timer_delete
12069 case TARGET_NR_timer_delete:
12071 /* args: timer_t timerid */
12072 target_timer_t timerid = get_timer_id(arg1);
12077 timer_t htimer = g_posix_timers[timerid];
12078 ret = get_errno(timer_delete(htimer));
12079 g_posix_timers[timerid] = 0;
12085 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12086 case TARGET_NR_timerfd_create:
12087 ret = get_errno(timerfd_create(arg1,
12088 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12092 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12093 case TARGET_NR_timerfd_gettime:
12095 struct itimerspec its_curr;
12097 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12099 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12106 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12107 case TARGET_NR_timerfd_settime:
12109 struct itimerspec its_new, its_old, *p_new;
12112 if (target_to_host_itimerspec(&its_new, arg3)) {
12120 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12122 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12129 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12130 case TARGET_NR_ioprio_get:
12131 ret = get_errno(ioprio_get(arg1, arg2));
12135 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12136 case TARGET_NR_ioprio_set:
12137 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12141 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12142 case TARGET_NR_setns:
12143 ret = get_errno(setns(arg1, arg2));
12146 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12147 case TARGET_NR_unshare:
12148 ret = get_errno(unshare(arg1));
12151 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12152 case TARGET_NR_kcmp:
12153 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12159 gemu_log("qemu: Unsupported syscall: %d\n", num);
12160 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12161 unimplemented_nowarn:
12163 ret = -TARGET_ENOSYS;
12168 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12171 print_syscall_ret(num, ret);
12172 trace_guest_user_syscall_ret(cpu, num, ret);
12175 ret = -TARGET_EFAULT;