4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #ifdef CONFIG_RTNETLINK
107 #include <linux/rtnetlink.h>
108 #include <linux/if_bridge.h>
110 #include <linux/audit.h>
111 #include "linux_loop.h"
117 #define CLONE_IO 0x80000000 /* Clone io context */
120 /* We can't directly call the host clone syscall, because this will
121 * badly confuse libc (breaking mutexes, for example). So we must
122 * divide clone flags into:
123 * * flag combinations that look like pthread_create()
124 * * flag combinations that look like fork()
125 * * flags we can implement within QEMU itself
126 * * flags we can't support and will return an error for
128 /* For thread creation, all these flags must be present; for
129 * fork, none must be present.
131 #define CLONE_THREAD_FLAGS \
132 (CLONE_VM | CLONE_FS | CLONE_FILES | \
133 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
135 /* These flags are ignored:
136 * CLONE_DETACHED is now ignored by the kernel;
137 * CLONE_IO is just an optimisation hint to the I/O scheduler
139 #define CLONE_IGNORED_FLAGS \
140 (CLONE_DETACHED | CLONE_IO)
142 /* Flags for fork which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_FORK_FLAGS \
144 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
145 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
147 /* Flags for thread creation which we can implement within QEMU itself */
148 #define CLONE_OPTIONAL_THREAD_FLAGS \
149 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
150 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
152 #define CLONE_INVALID_FORK_FLAGS \
153 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
155 #define CLONE_INVALID_THREAD_FLAGS \
156 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
157 CLONE_IGNORED_FLAGS))
159 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
160 * have almost all been allocated. We cannot support any of
161 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
162 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
163 * The checks against the invalid thread masks above will catch these.
164 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
169 * once. This exercises the codepaths for restart.
171 //#define DEBUG_ERESTARTSYS
173 //#include <linux/msdos_fs.h>
174 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
175 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
185 #define _syscall0(type,name) \
186 static type name (void) \
188 return syscall(__NR_##name); \
191 #define _syscall1(type,name,type1,arg1) \
192 static type name (type1 arg1) \
194 return syscall(__NR_##name, arg1); \
197 #define _syscall2(type,name,type1,arg1,type2,arg2) \
198 static type name (type1 arg1,type2 arg2) \
200 return syscall(__NR_##name, arg1, arg2); \
203 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
204 static type name (type1 arg1,type2 arg2,type3 arg3) \
206 return syscall(__NR_##name, arg1, arg2, arg3); \
209 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
210 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
212 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
215 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
219 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
223 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
224 type5,arg5,type6,arg6) \
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
232 #define __NR_sys_uname __NR_uname
233 #define __NR_sys_getcwd1 __NR_getcwd
234 #define __NR_sys_getdents __NR_getdents
235 #define __NR_sys_getdents64 __NR_getdents64
236 #define __NR_sys_getpriority __NR_getpriority
237 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
246 #define __NR__llseek __NR_lseek
249 /* Newer kernel ports have llseek() instead of _llseek() */
250 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
251 #define TARGET_NR__llseek TARGET_NR_llseek
255 _syscall0(int, gettid)
257 /* This is a replacement for the host gettid() and must return a host
259 static int gettid(void) {
263 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
264 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
266 #if !defined(__NR_getdents) || \
267 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
268 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
270 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
271 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
272 loff_t *, res, uint, wh);
274 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
275 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
276 #ifdef __NR_exit_group
277 _syscall1(int,exit_group,int,error_code)
279 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
280 _syscall1(int,set_tid_address,int *,tidptr)
282 #if defined(TARGET_NR_futex) && defined(__NR_futex)
283 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
284 const struct timespec *,timeout,int *,uaddr2,int,val3)
286 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
287 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
288 unsigned long *, user_mask_ptr);
289 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
290 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
291 unsigned long *, user_mask_ptr);
292 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
294 _syscall2(int, capget, struct __user_cap_header_struct *, header,
295 struct __user_cap_data_struct *, data);
296 _syscall2(int, capset, struct __user_cap_header_struct *, header,
297 struct __user_cap_data_struct *, data);
298 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
299 _syscall2(int, ioprio_get, int, which, int, who)
301 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
302 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
304 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
305 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
308 static bitmask_transtbl fcntl_flags_tbl[] = {
309 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
310 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
311 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
312 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
313 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
314 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
315 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
316 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
317 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
318 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
319 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
320 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
321 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
322 #if defined(O_DIRECT)
323 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
325 #if defined(O_NOATIME)
326 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
328 #if defined(O_CLOEXEC)
329 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
332 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
334 /* Don't terminate the list prematurely on 64-bit host+guest. */
335 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
336 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
343 QEMU_IFLA_BR_FORWARD_DELAY,
344 QEMU_IFLA_BR_HELLO_TIME,
345 QEMU_IFLA_BR_MAX_AGE,
346 QEMU_IFLA_BR_AGEING_TIME,
347 QEMU_IFLA_BR_STP_STATE,
348 QEMU_IFLA_BR_PRIORITY,
349 QEMU_IFLA_BR_VLAN_FILTERING,
350 QEMU_IFLA_BR_VLAN_PROTOCOL,
351 QEMU_IFLA_BR_GROUP_FWD_MASK,
352 QEMU_IFLA_BR_ROOT_ID,
353 QEMU_IFLA_BR_BRIDGE_ID,
354 QEMU_IFLA_BR_ROOT_PORT,
355 QEMU_IFLA_BR_ROOT_PATH_COST,
356 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
357 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
358 QEMU_IFLA_BR_HELLO_TIMER,
359 QEMU_IFLA_BR_TCN_TIMER,
360 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
361 QEMU_IFLA_BR_GC_TIMER,
362 QEMU_IFLA_BR_GROUP_ADDR,
363 QEMU_IFLA_BR_FDB_FLUSH,
364 QEMU_IFLA_BR_MCAST_ROUTER,
365 QEMU_IFLA_BR_MCAST_SNOOPING,
366 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
367 QEMU_IFLA_BR_MCAST_QUERIER,
368 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
369 QEMU_IFLA_BR_MCAST_HASH_MAX,
370 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
371 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
372 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
373 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
374 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
375 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
376 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
377 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
378 QEMU_IFLA_BR_NF_CALL_IPTABLES,
379 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
380 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
381 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
383 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
384 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408 QEMU_IFLA_NET_NS_PID,
411 QEMU_IFLA_VFINFO_LIST,
419 QEMU_IFLA_PROMISCUITY,
420 QEMU_IFLA_NUM_TX_QUEUES,
421 QEMU_IFLA_NUM_RX_QUEUES,
423 QEMU_IFLA_PHYS_PORT_ID,
424 QEMU_IFLA_CARRIER_CHANGES,
425 QEMU_IFLA_PHYS_SWITCH_ID,
426 QEMU_IFLA_LINK_NETNSID,
427 QEMU_IFLA_PHYS_PORT_NAME,
428 QEMU_IFLA_PROTO_DOWN,
429 QEMU_IFLA_GSO_MAX_SEGS,
430 QEMU_IFLA_GSO_MAX_SIZE,
437 QEMU_IFLA_BRPORT_UNSPEC,
438 QEMU_IFLA_BRPORT_STATE,
439 QEMU_IFLA_BRPORT_PRIORITY,
440 QEMU_IFLA_BRPORT_COST,
441 QEMU_IFLA_BRPORT_MODE,
442 QEMU_IFLA_BRPORT_GUARD,
443 QEMU_IFLA_BRPORT_PROTECT,
444 QEMU_IFLA_BRPORT_FAST_LEAVE,
445 QEMU_IFLA_BRPORT_LEARNING,
446 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
447 QEMU_IFLA_BRPORT_PROXYARP,
448 QEMU_IFLA_BRPORT_LEARNING_SYNC,
449 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
450 QEMU_IFLA_BRPORT_ROOT_ID,
451 QEMU_IFLA_BRPORT_BRIDGE_ID,
452 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
453 QEMU_IFLA_BRPORT_DESIGNATED_COST,
456 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
457 QEMU_IFLA_BRPORT_CONFIG_PENDING,
458 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
459 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
460 QEMU_IFLA_BRPORT_HOLD_TIMER,
461 QEMU_IFLA_BRPORT_FLUSH,
462 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
463 QEMU_IFLA_BRPORT_PAD,
464 QEMU___IFLA_BRPORT_MAX
468 QEMU_IFLA_INFO_UNSPEC,
471 QEMU_IFLA_INFO_XSTATS,
472 QEMU_IFLA_INFO_SLAVE_KIND,
473 QEMU_IFLA_INFO_SLAVE_DATA,
474 QEMU___IFLA_INFO_MAX,
478 QEMU_IFLA_INET_UNSPEC,
480 QEMU___IFLA_INET_MAX,
484 QEMU_IFLA_INET6_UNSPEC,
485 QEMU_IFLA_INET6_FLAGS,
486 QEMU_IFLA_INET6_CONF,
487 QEMU_IFLA_INET6_STATS,
488 QEMU_IFLA_INET6_MCAST,
489 QEMU_IFLA_INET6_CACHEINFO,
490 QEMU_IFLA_INET6_ICMP6STATS,
491 QEMU_IFLA_INET6_TOKEN,
492 QEMU_IFLA_INET6_ADDR_GEN_MODE,
493 QEMU___IFLA_INET6_MAX
496 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
497 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
498 typedef struct TargetFdTrans {
499 TargetFdDataFunc host_to_target_data;
500 TargetFdDataFunc target_to_host_data;
501 TargetFdAddrFunc target_to_host_addr;
504 static TargetFdTrans **target_fd_trans;
506 static unsigned int target_fd_max;
508 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
510 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
511 return target_fd_trans[fd]->target_to_host_data;
516 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
518 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
519 return target_fd_trans[fd]->host_to_target_data;
524 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
526 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
527 return target_fd_trans[fd]->target_to_host_addr;
532 static void fd_trans_register(int fd, TargetFdTrans *trans)
536 if (fd >= target_fd_max) {
537 oldmax = target_fd_max;
538 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
539 target_fd_trans = g_renew(TargetFdTrans *,
540 target_fd_trans, target_fd_max);
541 memset((void *)(target_fd_trans + oldmax), 0,
542 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
544 target_fd_trans[fd] = trans;
547 static void fd_trans_unregister(int fd)
549 if (fd >= 0 && fd < target_fd_max) {
550 target_fd_trans[fd] = NULL;
554 static void fd_trans_dup(int oldfd, int newfd)
556 fd_trans_unregister(newfd);
557 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
558 fd_trans_register(newfd, target_fd_trans[oldfd]);
562 static int sys_getcwd1(char *buf, size_t size)
564 if (getcwd(buf, size) == NULL) {
565 /* getcwd() sets errno */
568 return strlen(buf)+1;
571 #ifdef TARGET_NR_utimensat
572 #if defined(__NR_utimensat)
573 #define __NR_sys_utimensat __NR_utimensat
574 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
575 const struct timespec *,tsp,int,flags)
577 static int sys_utimensat(int dirfd, const char *pathname,
578 const struct timespec times[2], int flags)
584 #endif /* TARGET_NR_utimensat */
586 #ifdef CONFIG_INOTIFY
587 #include <sys/inotify.h>
589 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
590 static int sys_inotify_init(void)
592 return (inotify_init());
595 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
596 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
598 return (inotify_add_watch(fd, pathname, mask));
601 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
602 static int sys_inotify_rm_watch(int fd, int32_t wd)
604 return (inotify_rm_watch(fd, wd));
607 #ifdef CONFIG_INOTIFY1
608 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
609 static int sys_inotify_init1(int flags)
611 return (inotify_init1(flags));
616 /* Userspace can usually survive runtime without inotify */
617 #undef TARGET_NR_inotify_init
618 #undef TARGET_NR_inotify_init1
619 #undef TARGET_NR_inotify_add_watch
620 #undef TARGET_NR_inotify_rm_watch
621 #endif /* CONFIG_INOTIFY */
623 #if defined(TARGET_NR_prlimit64)
624 #ifndef __NR_prlimit64
625 # define __NR_prlimit64 -1
627 #define __NR_sys_prlimit64 __NR_prlimit64
628 /* The glibc rlimit structure may not be that used by the underlying syscall */
629 struct host_rlimit64 {
633 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
634 const struct host_rlimit64 *, new_limit,
635 struct host_rlimit64 *, old_limit)
639 #if defined(TARGET_NR_timer_create)
640 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
641 static timer_t g_posix_timers[32] = { 0, } ;
643 static inline int next_free_host_timer(void)
646 /* FIXME: Does finding the next free slot require a lock? */
647 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
648 if (g_posix_timers[k] == 0) {
649 g_posix_timers[k] = (timer_t) 1;
657 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
659 static inline int regpairs_aligned(void *cpu_env) {
660 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
662 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
663 static inline int regpairs_aligned(void *cpu_env) { return 1; }
664 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
665 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
666 * of registers which translates to the same as ARM/MIPS, because we start with
668 static inline int regpairs_aligned(void *cpu_env) { return 1; }
670 static inline int regpairs_aligned(void *cpu_env) { return 0; }
673 #define ERRNO_TABLE_SIZE 1200
675 /* target_to_host_errno_table[] is initialized from
676 * host_to_target_errno_table[] in syscall_init(). */
677 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
681 * This list is the union of errno values overridden in asm-<arch>/errno.h
682 * minus the errnos that are not actually generic to all archs.
684 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
685 [EAGAIN] = TARGET_EAGAIN,
686 [EIDRM] = TARGET_EIDRM,
687 [ECHRNG] = TARGET_ECHRNG,
688 [EL2NSYNC] = TARGET_EL2NSYNC,
689 [EL3HLT] = TARGET_EL3HLT,
690 [EL3RST] = TARGET_EL3RST,
691 [ELNRNG] = TARGET_ELNRNG,
692 [EUNATCH] = TARGET_EUNATCH,
693 [ENOCSI] = TARGET_ENOCSI,
694 [EL2HLT] = TARGET_EL2HLT,
695 [EDEADLK] = TARGET_EDEADLK,
696 [ENOLCK] = TARGET_ENOLCK,
697 [EBADE] = TARGET_EBADE,
698 [EBADR] = TARGET_EBADR,
699 [EXFULL] = TARGET_EXFULL,
700 [ENOANO] = TARGET_ENOANO,
701 [EBADRQC] = TARGET_EBADRQC,
702 [EBADSLT] = TARGET_EBADSLT,
703 [EBFONT] = TARGET_EBFONT,
704 [ENOSTR] = TARGET_ENOSTR,
705 [ENODATA] = TARGET_ENODATA,
706 [ETIME] = TARGET_ETIME,
707 [ENOSR] = TARGET_ENOSR,
708 [ENONET] = TARGET_ENONET,
709 [ENOPKG] = TARGET_ENOPKG,
710 [EREMOTE] = TARGET_EREMOTE,
711 [ENOLINK] = TARGET_ENOLINK,
712 [EADV] = TARGET_EADV,
713 [ESRMNT] = TARGET_ESRMNT,
714 [ECOMM] = TARGET_ECOMM,
715 [EPROTO] = TARGET_EPROTO,
716 [EDOTDOT] = TARGET_EDOTDOT,
717 [EMULTIHOP] = TARGET_EMULTIHOP,
718 [EBADMSG] = TARGET_EBADMSG,
719 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
720 [EOVERFLOW] = TARGET_EOVERFLOW,
721 [ENOTUNIQ] = TARGET_ENOTUNIQ,
722 [EBADFD] = TARGET_EBADFD,
723 [EREMCHG] = TARGET_EREMCHG,
724 [ELIBACC] = TARGET_ELIBACC,
725 [ELIBBAD] = TARGET_ELIBBAD,
726 [ELIBSCN] = TARGET_ELIBSCN,
727 [ELIBMAX] = TARGET_ELIBMAX,
728 [ELIBEXEC] = TARGET_ELIBEXEC,
729 [EILSEQ] = TARGET_EILSEQ,
730 [ENOSYS] = TARGET_ENOSYS,
731 [ELOOP] = TARGET_ELOOP,
732 [ERESTART] = TARGET_ERESTART,
733 [ESTRPIPE] = TARGET_ESTRPIPE,
734 [ENOTEMPTY] = TARGET_ENOTEMPTY,
735 [EUSERS] = TARGET_EUSERS,
736 [ENOTSOCK] = TARGET_ENOTSOCK,
737 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
738 [EMSGSIZE] = TARGET_EMSGSIZE,
739 [EPROTOTYPE] = TARGET_EPROTOTYPE,
740 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
741 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
742 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
743 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
744 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
745 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
746 [EADDRINUSE] = TARGET_EADDRINUSE,
747 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
748 [ENETDOWN] = TARGET_ENETDOWN,
749 [ENETUNREACH] = TARGET_ENETUNREACH,
750 [ENETRESET] = TARGET_ENETRESET,
751 [ECONNABORTED] = TARGET_ECONNABORTED,
752 [ECONNRESET] = TARGET_ECONNRESET,
753 [ENOBUFS] = TARGET_ENOBUFS,
754 [EISCONN] = TARGET_EISCONN,
755 [ENOTCONN] = TARGET_ENOTCONN,
756 [EUCLEAN] = TARGET_EUCLEAN,
757 [ENOTNAM] = TARGET_ENOTNAM,
758 [ENAVAIL] = TARGET_ENAVAIL,
759 [EISNAM] = TARGET_EISNAM,
760 [EREMOTEIO] = TARGET_EREMOTEIO,
761 [EDQUOT] = TARGET_EDQUOT,
762 [ESHUTDOWN] = TARGET_ESHUTDOWN,
763 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
764 [ETIMEDOUT] = TARGET_ETIMEDOUT,
765 [ECONNREFUSED] = TARGET_ECONNREFUSED,
766 [EHOSTDOWN] = TARGET_EHOSTDOWN,
767 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
768 [EALREADY] = TARGET_EALREADY,
769 [EINPROGRESS] = TARGET_EINPROGRESS,
770 [ESTALE] = TARGET_ESTALE,
771 [ECANCELED] = TARGET_ECANCELED,
772 [ENOMEDIUM] = TARGET_ENOMEDIUM,
773 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
775 [ENOKEY] = TARGET_ENOKEY,
778 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
781 [EKEYREVOKED] = TARGET_EKEYREVOKED,
784 [EKEYREJECTED] = TARGET_EKEYREJECTED,
787 [EOWNERDEAD] = TARGET_EOWNERDEAD,
789 #ifdef ENOTRECOVERABLE
790 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
793 [ENOMSG] = TARGET_ENOMSG,
797 static inline int host_to_target_errno(int err)
799 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
800 host_to_target_errno_table[err]) {
801 return host_to_target_errno_table[err];
806 static inline int target_to_host_errno(int err)
808 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
809 target_to_host_errno_table[err]) {
810 return target_to_host_errno_table[err];
815 static inline abi_long get_errno(abi_long ret)
818 return -host_to_target_errno(errno);
823 static inline int is_error(abi_long ret)
825 return (abi_ulong)ret >= (abi_ulong)(-4096);
828 const char *target_strerror(int err)
830 if (err == TARGET_ERESTARTSYS) {
831 return "To be restarted";
833 if (err == TARGET_QEMU_ESIGRETURN) {
834 return "Successful exit from sigreturn";
837 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
840 return strerror(target_to_host_errno(err));
843 #define safe_syscall0(type, name) \
844 static type safe_##name(void) \
846 return safe_syscall(__NR_##name); \
849 #define safe_syscall1(type, name, type1, arg1) \
850 static type safe_##name(type1 arg1) \
852 return safe_syscall(__NR_##name, arg1); \
855 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
856 static type safe_##name(type1 arg1, type2 arg2) \
858 return safe_syscall(__NR_##name, arg1, arg2); \
861 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
862 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
864 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
867 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
869 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
871 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
874 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
875 type4, arg4, type5, arg5) \
876 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
879 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
882 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
883 type4, arg4, type5, arg5, type6, arg6) \
884 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
885 type5 arg5, type6 arg6) \
887 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
890 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
891 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
892 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
893 int, flags, mode_t, mode)
894 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
895 struct rusage *, rusage)
896 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
897 int, options, struct rusage *, rusage)
898 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
899 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
900 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
901 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
902 struct timespec *, tsp, const sigset_t *, sigmask,
904 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
905 int, maxevents, int, timeout, const sigset_t *, sigmask,
907 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
908 const struct timespec *,timeout,int *,uaddr2,int,val3)
909 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
910 safe_syscall2(int, kill, pid_t, pid, int, sig)
911 safe_syscall2(int, tkill, int, tid, int, sig)
912 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
913 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
914 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
915 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
917 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
918 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
919 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
920 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
921 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
922 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
923 safe_syscall2(int, flock, int, fd, int, operation)
924 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
925 const struct timespec *, uts, size_t, sigsetsize)
926 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
928 safe_syscall2(int, nanosleep, const struct timespec *, req,
929 struct timespec *, rem)
930 #ifdef TARGET_NR_clock_nanosleep
931 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
932 const struct timespec *, req, struct timespec *, rem)
935 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
937 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
938 long, msgtype, int, flags)
939 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
940 unsigned, nsops, const struct timespec *, timeout)
942 /* This host kernel architecture uses a single ipc syscall; fake up
943 * wrappers for the sub-operations to hide this implementation detail.
944 * Annoyingly we can't include linux/ipc.h to get the constant definitions
945 * for the call parameter because some structs in there conflict with the
946 * sys/ipc.h ones. So we just define them here, and rely on them being
947 * the same for all host architectures.
949 #define Q_SEMTIMEDOP 4
952 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
954 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
955 void *, ptr, long, fifth)
956 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
958 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
960 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
962 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
964 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
965 const struct timespec *timeout)
967 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
971 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
972 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
973 size_t, len, unsigned, prio, const struct timespec *, timeout)
974 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
975 size_t, len, unsigned *, prio, const struct timespec *, timeout)
977 /* We do ioctl like this rather than via safe_syscall3 to preserve the
978 * "third argument might be integer or pointer or not present" behaviour of
981 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
982 /* Similarly for fcntl. Note that callers must always:
983 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
984 * use the flock64 struct rather than unsuffixed flock
985 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
988 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
990 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
993 static inline int host_to_target_sock_type(int host_type)
997 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
999 target_type = TARGET_SOCK_DGRAM;
1002 target_type = TARGET_SOCK_STREAM;
1005 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1009 #if defined(SOCK_CLOEXEC)
1010 if (host_type & SOCK_CLOEXEC) {
1011 target_type |= TARGET_SOCK_CLOEXEC;
1015 #if defined(SOCK_NONBLOCK)
1016 if (host_type & SOCK_NONBLOCK) {
1017 target_type |= TARGET_SOCK_NONBLOCK;
1024 static abi_ulong target_brk;
1025 static abi_ulong target_original_brk;
1026 static abi_ulong brk_page;
1028 void target_set_brk(abi_ulong new_brk)
1030 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1031 brk_page = HOST_PAGE_ALIGN(target_brk);
1034 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1035 #define DEBUGF_BRK(message, args...)
1037 /* do_brk() must return target values and target errnos. */
1038 abi_long do_brk(abi_ulong new_brk)
1040 abi_long mapped_addr;
1041 abi_ulong new_alloc_size;
1043 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1046 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1049 if (new_brk < target_original_brk) {
1050 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1055 /* If the new brk is less than the highest page reserved to the
1056 * target heap allocation, set it and we're almost done... */
1057 if (new_brk <= brk_page) {
1058 /* Heap contents are initialized to zero, as for anonymous
1060 if (new_brk > target_brk) {
1061 memset(g2h(target_brk), 0, new_brk - target_brk);
1063 target_brk = new_brk;
1064 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1068 /* We need to allocate more memory after the brk... Note that
1069 * we don't use MAP_FIXED because that will map over the top of
1070 * any existing mapping (like the one with the host libc or qemu
1071 * itself); instead we treat "mapped but at wrong address" as
1072 * a failure and unmap again.
1074 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1075 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1076 PROT_READ|PROT_WRITE,
1077 MAP_ANON|MAP_PRIVATE, 0, 0));
1079 if (mapped_addr == brk_page) {
1080 /* Heap contents are initialized to zero, as for anonymous
1081 * mapped pages. Technically the new pages are already
1082 * initialized to zero since they *are* anonymous mapped
1083 * pages, however we have to take care with the contents that
1084 * come from the remaining part of the previous page: it may
1085 * contains garbage data due to a previous heap usage (grown
1086 * then shrunken). */
1087 memset(g2h(target_brk), 0, brk_page - target_brk);
1089 target_brk = new_brk;
1090 brk_page = HOST_PAGE_ALIGN(target_brk);
1091 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1094 } else if (mapped_addr != -1) {
1095 /* Mapped but at wrong address, meaning there wasn't actually
1096 * enough space for this brk.
1098 target_munmap(mapped_addr, new_alloc_size);
1100 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1103 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1106 #if defined(TARGET_ALPHA)
1107 /* We (partially) emulate OSF/1 on Alpha, which requires we
1108 return a proper errno, not an unchanged brk value. */
1109 return -TARGET_ENOMEM;
1111 /* For everything else, return the previous break. */
1115 static inline abi_long copy_from_user_fdset(fd_set *fds,
1116 abi_ulong target_fds_addr,
1120 abi_ulong b, *target_fds;
1122 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1123 if (!(target_fds = lock_user(VERIFY_READ,
1125 sizeof(abi_ulong) * nw,
1127 return -TARGET_EFAULT;
1131 for (i = 0; i < nw; i++) {
1132 /* grab the abi_ulong */
1133 __get_user(b, &target_fds[i]);
1134 for (j = 0; j < TARGET_ABI_BITS; j++) {
1135 /* check the bit inside the abi_ulong */
1142 unlock_user(target_fds, target_fds_addr, 0);
1147 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1148 abi_ulong target_fds_addr,
1151 if (target_fds_addr) {
1152 if (copy_from_user_fdset(fds, target_fds_addr, n))
1153 return -TARGET_EFAULT;
1161 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1167 abi_ulong *target_fds;
1169 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1170 if (!(target_fds = lock_user(VERIFY_WRITE,
1172 sizeof(abi_ulong) * nw,
1174 return -TARGET_EFAULT;
1177 for (i = 0; i < nw; i++) {
1179 for (j = 0; j < TARGET_ABI_BITS; j++) {
1180 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1183 __put_user(v, &target_fds[i]);
1186 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1191 #if defined(__alpha__)
1192 #define HOST_HZ 1024
1197 static inline abi_long host_to_target_clock_t(long ticks)
1199 #if HOST_HZ == TARGET_HZ
1202 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1206 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1207 const struct rusage *rusage)
1209 struct target_rusage *target_rusage;
1211 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1212 return -TARGET_EFAULT;
1213 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1214 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1215 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1216 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1217 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1218 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1219 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1220 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1221 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1222 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1223 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1224 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1225 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1226 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1227 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1228 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1229 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1230 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1231 unlock_user_struct(target_rusage, target_addr, 1);
1236 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1238 abi_ulong target_rlim_swap;
1241 target_rlim_swap = tswapal(target_rlim);
1242 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1243 return RLIM_INFINITY;
1245 result = target_rlim_swap;
1246 if (target_rlim_swap != (rlim_t)result)
1247 return RLIM_INFINITY;
1252 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1254 abi_ulong target_rlim_swap;
1257 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1258 target_rlim_swap = TARGET_RLIM_INFINITY;
1260 target_rlim_swap = rlim;
1261 result = tswapal(target_rlim_swap);
1266 static inline int target_to_host_resource(int code)
1269 case TARGET_RLIMIT_AS:
1271 case TARGET_RLIMIT_CORE:
1273 case TARGET_RLIMIT_CPU:
1275 case TARGET_RLIMIT_DATA:
1277 case TARGET_RLIMIT_FSIZE:
1278 return RLIMIT_FSIZE;
1279 case TARGET_RLIMIT_LOCKS:
1280 return RLIMIT_LOCKS;
1281 case TARGET_RLIMIT_MEMLOCK:
1282 return RLIMIT_MEMLOCK;
1283 case TARGET_RLIMIT_MSGQUEUE:
1284 return RLIMIT_MSGQUEUE;
1285 case TARGET_RLIMIT_NICE:
1287 case TARGET_RLIMIT_NOFILE:
1288 return RLIMIT_NOFILE;
1289 case TARGET_RLIMIT_NPROC:
1290 return RLIMIT_NPROC;
1291 case TARGET_RLIMIT_RSS:
1293 case TARGET_RLIMIT_RTPRIO:
1294 return RLIMIT_RTPRIO;
1295 case TARGET_RLIMIT_SIGPENDING:
1296 return RLIMIT_SIGPENDING;
1297 case TARGET_RLIMIT_STACK:
1298 return RLIMIT_STACK;
1304 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1305 abi_ulong target_tv_addr)
1307 struct target_timeval *target_tv;
1309 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1310 return -TARGET_EFAULT;
1312 __get_user(tv->tv_sec, &target_tv->tv_sec);
1313 __get_user(tv->tv_usec, &target_tv->tv_usec);
1315 unlock_user_struct(target_tv, target_tv_addr, 0);
1320 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1321 const struct timeval *tv)
1323 struct target_timeval *target_tv;
1325 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1326 return -TARGET_EFAULT;
1328 __put_user(tv->tv_sec, &target_tv->tv_sec);
1329 __put_user(tv->tv_usec, &target_tv->tv_usec);
1331 unlock_user_struct(target_tv, target_tv_addr, 1);
1336 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1337 abi_ulong target_tz_addr)
1339 struct target_timezone *target_tz;
1341 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1342 return -TARGET_EFAULT;
1345 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1346 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1348 unlock_user_struct(target_tz, target_tz_addr, 0);
1353 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1356 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1357 abi_ulong target_mq_attr_addr)
1359 struct target_mq_attr *target_mq_attr;
1361 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1362 target_mq_attr_addr, 1))
1363 return -TARGET_EFAULT;
1365 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1366 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1367 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1368 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1370 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1375 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1376 const struct mq_attr *attr)
1378 struct target_mq_attr *target_mq_attr;
1380 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1381 target_mq_attr_addr, 0))
1382 return -TARGET_EFAULT;
1384 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1385 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1386 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1387 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1389 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1395 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1396 /* do_select() must return target values and target errnos. */
1397 static abi_long do_select(int n,
1398 abi_ulong rfd_addr, abi_ulong wfd_addr,
1399 abi_ulong efd_addr, abi_ulong target_tv_addr)
1401 fd_set rfds, wfds, efds;
1402 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1404 struct timespec ts, *ts_ptr;
1407 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1411 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1415 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1420 if (target_tv_addr) {
1421 if (copy_from_user_timeval(&tv, target_tv_addr))
1422 return -TARGET_EFAULT;
1423 ts.tv_sec = tv.tv_sec;
1424 ts.tv_nsec = tv.tv_usec * 1000;
1430 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1433 if (!is_error(ret)) {
1434 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1435 return -TARGET_EFAULT;
1436 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1437 return -TARGET_EFAULT;
1438 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1439 return -TARGET_EFAULT;
1441 if (target_tv_addr) {
1442 tv.tv_sec = ts.tv_sec;
1443 tv.tv_usec = ts.tv_nsec / 1000;
1444 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1445 return -TARGET_EFAULT;
1453 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1454 static abi_long do_old_select(abi_ulong arg1)
1456 struct target_sel_arg_struct *sel;
1457 abi_ulong inp, outp, exp, tvp;
1460 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1461 return -TARGET_EFAULT;
1464 nsel = tswapal(sel->n);
1465 inp = tswapal(sel->inp);
1466 outp = tswapal(sel->outp);
1467 exp = tswapal(sel->exp);
1468 tvp = tswapal(sel->tvp);
1470 unlock_user_struct(sel, arg1, 0);
1472 return do_select(nsel, inp, outp, exp, tvp);
1477 static abi_long do_pipe2(int host_pipe[], int flags)
1480 return pipe2(host_pipe, flags);
1486 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1487 int flags, int is_pipe2)
1491 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1494 return get_errno(ret);
1496 /* Several targets have special calling conventions for the original
1497 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1499 #if defined(TARGET_ALPHA)
1500 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1501 return host_pipe[0];
1502 #elif defined(TARGET_MIPS)
1503 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1504 return host_pipe[0];
1505 #elif defined(TARGET_SH4)
1506 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1507 return host_pipe[0];
1508 #elif defined(TARGET_SPARC)
1509 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1510 return host_pipe[0];
1514 if (put_user_s32(host_pipe[0], pipedes)
1515 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1516 return -TARGET_EFAULT;
1517 return get_errno(ret);
1520 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1521 abi_ulong target_addr,
1524 struct target_ip_mreqn *target_smreqn;
1526 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1528 return -TARGET_EFAULT;
1529 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1530 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1531 if (len == sizeof(struct target_ip_mreqn))
1532 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1533 unlock_user(target_smreqn, target_addr, 0);
1538 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1539 abi_ulong target_addr,
1542 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1543 sa_family_t sa_family;
1544 struct target_sockaddr *target_saddr;
1546 if (fd_trans_target_to_host_addr(fd)) {
1547 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1550 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1552 return -TARGET_EFAULT;
1554 sa_family = tswap16(target_saddr->sa_family);
1556 /* Oops. The caller might send a incomplete sun_path; sun_path
1557 * must be terminated by \0 (see the manual page), but
1558 * unfortunately it is quite common to specify sockaddr_un
1559 * length as "strlen(x->sun_path)" while it should be
1560 * "strlen(...) + 1". We'll fix that here if needed.
1561 * Linux kernel has a similar feature.
1564 if (sa_family == AF_UNIX) {
1565 if (len < unix_maxlen && len > 0) {
1566 char *cp = (char*)target_saddr;
1568 if ( cp[len-1] && !cp[len] )
1571 if (len > unix_maxlen)
1575 memcpy(addr, target_saddr, len);
1576 addr->sa_family = sa_family;
1577 if (sa_family == AF_NETLINK) {
1578 struct sockaddr_nl *nladdr;
1580 nladdr = (struct sockaddr_nl *)addr;
1581 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1582 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1583 } else if (sa_family == AF_PACKET) {
1584 struct target_sockaddr_ll *lladdr;
1586 lladdr = (struct target_sockaddr_ll *)addr;
1587 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1588 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1590 unlock_user(target_saddr, target_addr, 0);
1595 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1596 struct sockaddr *addr,
1599 struct target_sockaddr *target_saddr;
1605 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1607 return -TARGET_EFAULT;
1608 memcpy(target_saddr, addr, len);
1609 if (len >= offsetof(struct target_sockaddr, sa_family) +
1610 sizeof(target_saddr->sa_family)) {
1611 target_saddr->sa_family = tswap16(addr->sa_family);
1613 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1614 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1615 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1616 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1617 } else if (addr->sa_family == AF_PACKET) {
1618 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1619 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1620 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1622 unlock_user(target_saddr, target_addr, len);
1627 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1628 struct target_msghdr *target_msgh)
1630 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1631 abi_long msg_controllen;
1632 abi_ulong target_cmsg_addr;
1633 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1634 socklen_t space = 0;
1636 msg_controllen = tswapal(target_msgh->msg_controllen);
1637 if (msg_controllen < sizeof (struct target_cmsghdr))
1639 target_cmsg_addr = tswapal(target_msgh->msg_control);
1640 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1641 target_cmsg_start = target_cmsg;
1643 return -TARGET_EFAULT;
1645 while (cmsg && target_cmsg) {
1646 void *data = CMSG_DATA(cmsg);
1647 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1649 int len = tswapal(target_cmsg->cmsg_len)
1650 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1652 space += CMSG_SPACE(len);
1653 if (space > msgh->msg_controllen) {
1654 space -= CMSG_SPACE(len);
1655 /* This is a QEMU bug, since we allocated the payload
1656 * area ourselves (unlike overflow in host-to-target
1657 * conversion, which is just the guest giving us a buffer
1658 * that's too small). It can't happen for the payload types
1659 * we currently support; if it becomes an issue in future
1660 * we would need to improve our allocation strategy to
1661 * something more intelligent than "twice the size of the
1662 * target buffer we're reading from".
1664 gemu_log("Host cmsg overflow\n");
1668 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1669 cmsg->cmsg_level = SOL_SOCKET;
1671 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1673 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1674 cmsg->cmsg_len = CMSG_LEN(len);
1676 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1677 int *fd = (int *)data;
1678 int *target_fd = (int *)target_data;
1679 int i, numfds = len / sizeof(int);
1681 for (i = 0; i < numfds; i++) {
1682 __get_user(fd[i], target_fd + i);
1684 } else if (cmsg->cmsg_level == SOL_SOCKET
1685 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1686 struct ucred *cred = (struct ucred *)data;
1687 struct target_ucred *target_cred =
1688 (struct target_ucred *)target_data;
1690 __get_user(cred->pid, &target_cred->pid);
1691 __get_user(cred->uid, &target_cred->uid);
1692 __get_user(cred->gid, &target_cred->gid);
1694 gemu_log("Unsupported ancillary data: %d/%d\n",
1695 cmsg->cmsg_level, cmsg->cmsg_type);
1696 memcpy(data, target_data, len);
1699 cmsg = CMSG_NXTHDR(msgh, cmsg);
1700 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1703 unlock_user(target_cmsg, target_cmsg_addr, 0);
1705 msgh->msg_controllen = space;
1709 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1710 struct msghdr *msgh)
1712 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1713 abi_long msg_controllen;
1714 abi_ulong target_cmsg_addr;
1715 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1716 socklen_t space = 0;
1718 msg_controllen = tswapal(target_msgh->msg_controllen);
1719 if (msg_controllen < sizeof (struct target_cmsghdr))
1721 target_cmsg_addr = tswapal(target_msgh->msg_control);
1722 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1723 target_cmsg_start = target_cmsg;
1725 return -TARGET_EFAULT;
1727 while (cmsg && target_cmsg) {
1728 void *data = CMSG_DATA(cmsg);
1729 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1731 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1732 int tgt_len, tgt_space;
1734 /* We never copy a half-header but may copy half-data;
1735 * this is Linux's behaviour in put_cmsg(). Note that
1736 * truncation here is a guest problem (which we report
1737 * to the guest via the CTRUNC bit), unlike truncation
1738 * in target_to_host_cmsg, which is a QEMU bug.
1740 if (msg_controllen < sizeof(struct cmsghdr)) {
1741 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1745 if (cmsg->cmsg_level == SOL_SOCKET) {
1746 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1748 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1750 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1752 tgt_len = TARGET_CMSG_LEN(len);
1754 /* Payload types which need a different size of payload on
1755 * the target must adjust tgt_len here.
1757 switch (cmsg->cmsg_level) {
1759 switch (cmsg->cmsg_type) {
1761 tgt_len = sizeof(struct target_timeval);
1770 if (msg_controllen < tgt_len) {
1771 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1772 tgt_len = msg_controllen;
1775 /* We must now copy-and-convert len bytes of payload
1776 * into tgt_len bytes of destination space. Bear in mind
1777 * that in both source and destination we may be dealing
1778 * with a truncated value!
1780 switch (cmsg->cmsg_level) {
1782 switch (cmsg->cmsg_type) {
1785 int *fd = (int *)data;
1786 int *target_fd = (int *)target_data;
1787 int i, numfds = tgt_len / sizeof(int);
1789 for (i = 0; i < numfds; i++) {
1790 __put_user(fd[i], target_fd + i);
1796 struct timeval *tv = (struct timeval *)data;
1797 struct target_timeval *target_tv =
1798 (struct target_timeval *)target_data;
1800 if (len != sizeof(struct timeval) ||
1801 tgt_len != sizeof(struct target_timeval)) {
1805 /* copy struct timeval to target */
1806 __put_user(tv->tv_sec, &target_tv->tv_sec);
1807 __put_user(tv->tv_usec, &target_tv->tv_usec);
1810 case SCM_CREDENTIALS:
1812 struct ucred *cred = (struct ucred *)data;
1813 struct target_ucred *target_cred =
1814 (struct target_ucred *)target_data;
1816 __put_user(cred->pid, &target_cred->pid);
1817 __put_user(cred->uid, &target_cred->uid);
1818 __put_user(cred->gid, &target_cred->gid);
1828 gemu_log("Unsupported ancillary data: %d/%d\n",
1829 cmsg->cmsg_level, cmsg->cmsg_type);
1830 memcpy(target_data, data, MIN(len, tgt_len));
1831 if (tgt_len > len) {
1832 memset(target_data + len, 0, tgt_len - len);
1836 target_cmsg->cmsg_len = tswapal(tgt_len);
1837 tgt_space = TARGET_CMSG_SPACE(len);
1838 if (msg_controllen < tgt_space) {
1839 tgt_space = msg_controllen;
1841 msg_controllen -= tgt_space;
1843 cmsg = CMSG_NXTHDR(msgh, cmsg);
1844 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1847 unlock_user(target_cmsg, target_cmsg_addr, space);
1849 target_msgh->msg_controllen = tswapal(space);
1853 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1855 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1856 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1857 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1858 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1859 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1862 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1864 abi_long (*host_to_target_nlmsg)
1865 (struct nlmsghdr *))
1870 while (len > sizeof(struct nlmsghdr)) {
1872 nlmsg_len = nlh->nlmsg_len;
1873 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1878 switch (nlh->nlmsg_type) {
1880 tswap_nlmsghdr(nlh);
1886 struct nlmsgerr *e = NLMSG_DATA(nlh);
1887 e->error = tswap32(e->error);
1888 tswap_nlmsghdr(&e->msg);
1889 tswap_nlmsghdr(nlh);
1893 ret = host_to_target_nlmsg(nlh);
1895 tswap_nlmsghdr(nlh);
1900 tswap_nlmsghdr(nlh);
1901 len -= NLMSG_ALIGN(nlmsg_len);
1902 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1907 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1909 abi_long (*target_to_host_nlmsg)
1910 (struct nlmsghdr *))
1914 while (len > sizeof(struct nlmsghdr)) {
1915 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1916 tswap32(nlh->nlmsg_len) > len) {
1919 tswap_nlmsghdr(nlh);
1920 switch (nlh->nlmsg_type) {
1927 struct nlmsgerr *e = NLMSG_DATA(nlh);
1928 e->error = tswap32(e->error);
1929 tswap_nlmsghdr(&e->msg);
1933 ret = target_to_host_nlmsg(nlh);
1938 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1939 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1944 #ifdef CONFIG_RTNETLINK
1945 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1946 size_t len, void *context,
1947 abi_long (*host_to_target_nlattr)
1951 unsigned short nla_len;
1954 while (len > sizeof(struct nlattr)) {
1955 nla_len = nlattr->nla_len;
1956 if (nla_len < sizeof(struct nlattr) ||
1960 ret = host_to_target_nlattr(nlattr, context);
1961 nlattr->nla_len = tswap16(nlattr->nla_len);
1962 nlattr->nla_type = tswap16(nlattr->nla_type);
1966 len -= NLA_ALIGN(nla_len);
1967 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1972 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1974 abi_long (*host_to_target_rtattr)
1977 unsigned short rta_len;
1980 while (len > sizeof(struct rtattr)) {
1981 rta_len = rtattr->rta_len;
1982 if (rta_len < sizeof(struct rtattr) ||
1986 ret = host_to_target_rtattr(rtattr);
1987 rtattr->rta_len = tswap16(rtattr->rta_len);
1988 rtattr->rta_type = tswap16(rtattr->rta_type);
1992 len -= RTA_ALIGN(rta_len);
1993 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1998 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2000 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2007 switch (nlattr->nla_type) {
2009 case QEMU_IFLA_BR_FDB_FLUSH:
2012 case QEMU_IFLA_BR_GROUP_ADDR:
2015 case QEMU_IFLA_BR_VLAN_FILTERING:
2016 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2017 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2018 case QEMU_IFLA_BR_MCAST_ROUTER:
2019 case QEMU_IFLA_BR_MCAST_SNOOPING:
2020 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2021 case QEMU_IFLA_BR_MCAST_QUERIER:
2022 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2023 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2024 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2027 case QEMU_IFLA_BR_PRIORITY:
2028 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2029 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2030 case QEMU_IFLA_BR_ROOT_PORT:
2031 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2032 u16 = NLA_DATA(nlattr);
2033 *u16 = tswap16(*u16);
2036 case QEMU_IFLA_BR_FORWARD_DELAY:
2037 case QEMU_IFLA_BR_HELLO_TIME:
2038 case QEMU_IFLA_BR_MAX_AGE:
2039 case QEMU_IFLA_BR_AGEING_TIME:
2040 case QEMU_IFLA_BR_STP_STATE:
2041 case QEMU_IFLA_BR_ROOT_PATH_COST:
2042 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2043 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2044 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2045 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2046 u32 = NLA_DATA(nlattr);
2047 *u32 = tswap32(*u32);
2050 case QEMU_IFLA_BR_HELLO_TIMER:
2051 case QEMU_IFLA_BR_TCN_TIMER:
2052 case QEMU_IFLA_BR_GC_TIMER:
2053 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2054 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2055 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2056 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2057 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2058 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2059 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2060 u64 = NLA_DATA(nlattr);
2061 *u64 = tswap64(*u64);
2063 /* ifla_bridge_id: uin8_t[] */
2064 case QEMU_IFLA_BR_ROOT_ID:
2065 case QEMU_IFLA_BR_BRIDGE_ID:
2068 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2074 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2081 switch (nlattr->nla_type) {
2083 case QEMU_IFLA_BRPORT_STATE:
2084 case QEMU_IFLA_BRPORT_MODE:
2085 case QEMU_IFLA_BRPORT_GUARD:
2086 case QEMU_IFLA_BRPORT_PROTECT:
2087 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2088 case QEMU_IFLA_BRPORT_LEARNING:
2089 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2090 case QEMU_IFLA_BRPORT_PROXYARP:
2091 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2092 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2093 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2094 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2095 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2098 case QEMU_IFLA_BRPORT_PRIORITY:
2099 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2100 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2101 case QEMU_IFLA_BRPORT_ID:
2102 case QEMU_IFLA_BRPORT_NO:
2103 u16 = NLA_DATA(nlattr);
2104 *u16 = tswap16(*u16);
2107 case QEMU_IFLA_BRPORT_COST:
2108 u32 = NLA_DATA(nlattr);
2109 *u32 = tswap32(*u32);
2112 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2113 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2114 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2115 u64 = NLA_DATA(nlattr);
2116 *u64 = tswap64(*u64);
2118 /* ifla_bridge_id: uint8_t[] */
2119 case QEMU_IFLA_BRPORT_ROOT_ID:
2120 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2123 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2129 struct linkinfo_context {
2136 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2139 struct linkinfo_context *li_context = context;
2141 switch (nlattr->nla_type) {
2143 case QEMU_IFLA_INFO_KIND:
2144 li_context->name = NLA_DATA(nlattr);
2145 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2147 case QEMU_IFLA_INFO_SLAVE_KIND:
2148 li_context->slave_name = NLA_DATA(nlattr);
2149 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2152 case QEMU_IFLA_INFO_XSTATS:
2153 /* FIXME: only used by CAN */
2156 case QEMU_IFLA_INFO_DATA:
2157 if (strncmp(li_context->name, "bridge",
2158 li_context->len) == 0) {
2159 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2162 host_to_target_data_bridge_nlattr);
2164 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2167 case QEMU_IFLA_INFO_SLAVE_DATA:
2168 if (strncmp(li_context->slave_name, "bridge",
2169 li_context->slave_len) == 0) {
2170 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2173 host_to_target_slave_data_bridge_nlattr);
2175 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2176 li_context->slave_name);
2180 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2187 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2193 switch (nlattr->nla_type) {
2194 case QEMU_IFLA_INET_CONF:
2195 u32 = NLA_DATA(nlattr);
2196 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2198 u32[i] = tswap32(u32[i]);
2202 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2207 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2212 struct ifla_cacheinfo *ci;
2215 switch (nlattr->nla_type) {
2217 case QEMU_IFLA_INET6_TOKEN:
2220 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2223 case QEMU_IFLA_INET6_FLAGS:
2224 u32 = NLA_DATA(nlattr);
2225 *u32 = tswap32(*u32);
2228 case QEMU_IFLA_INET6_CONF:
2229 u32 = NLA_DATA(nlattr);
2230 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2232 u32[i] = tswap32(u32[i]);
2235 /* ifla_cacheinfo */
2236 case QEMU_IFLA_INET6_CACHEINFO:
2237 ci = NLA_DATA(nlattr);
2238 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2239 ci->tstamp = tswap32(ci->tstamp);
2240 ci->reachable_time = tswap32(ci->reachable_time);
2241 ci->retrans_time = tswap32(ci->retrans_time);
2244 case QEMU_IFLA_INET6_STATS:
2245 case QEMU_IFLA_INET6_ICMP6STATS:
2246 u64 = NLA_DATA(nlattr);
2247 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2249 u64[i] = tswap64(u64[i]);
2253 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2258 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2261 switch (nlattr->nla_type) {
2263 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2265 host_to_target_data_inet_nlattr);
2267 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2269 host_to_target_data_inet6_nlattr);
2271 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2277 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2280 struct rtnl_link_stats *st;
2281 struct rtnl_link_stats64 *st64;
2282 struct rtnl_link_ifmap *map;
2283 struct linkinfo_context li_context;
2285 switch (rtattr->rta_type) {
2287 case QEMU_IFLA_ADDRESS:
2288 case QEMU_IFLA_BROADCAST:
2290 case QEMU_IFLA_IFNAME:
2291 case QEMU_IFLA_QDISC:
2294 case QEMU_IFLA_OPERSTATE:
2295 case QEMU_IFLA_LINKMODE:
2296 case QEMU_IFLA_CARRIER:
2297 case QEMU_IFLA_PROTO_DOWN:
2301 case QEMU_IFLA_LINK:
2302 case QEMU_IFLA_WEIGHT:
2303 case QEMU_IFLA_TXQLEN:
2304 case QEMU_IFLA_CARRIER_CHANGES:
2305 case QEMU_IFLA_NUM_RX_QUEUES:
2306 case QEMU_IFLA_NUM_TX_QUEUES:
2307 case QEMU_IFLA_PROMISCUITY:
2308 case QEMU_IFLA_EXT_MASK:
2309 case QEMU_IFLA_LINK_NETNSID:
2310 case QEMU_IFLA_GROUP:
2311 case QEMU_IFLA_MASTER:
2312 case QEMU_IFLA_NUM_VF:
2313 u32 = RTA_DATA(rtattr);
2314 *u32 = tswap32(*u32);
2316 /* struct rtnl_link_stats */
2317 case QEMU_IFLA_STATS:
2318 st = RTA_DATA(rtattr);
2319 st->rx_packets = tswap32(st->rx_packets);
2320 st->tx_packets = tswap32(st->tx_packets);
2321 st->rx_bytes = tswap32(st->rx_bytes);
2322 st->tx_bytes = tswap32(st->tx_bytes);
2323 st->rx_errors = tswap32(st->rx_errors);
2324 st->tx_errors = tswap32(st->tx_errors);
2325 st->rx_dropped = tswap32(st->rx_dropped);
2326 st->tx_dropped = tswap32(st->tx_dropped);
2327 st->multicast = tswap32(st->multicast);
2328 st->collisions = tswap32(st->collisions);
2330 /* detailed rx_errors: */
2331 st->rx_length_errors = tswap32(st->rx_length_errors);
2332 st->rx_over_errors = tswap32(st->rx_over_errors);
2333 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2334 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2335 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2336 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2338 /* detailed tx_errors */
2339 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2340 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2341 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2342 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2343 st->tx_window_errors = tswap32(st->tx_window_errors);
2346 st->rx_compressed = tswap32(st->rx_compressed);
2347 st->tx_compressed = tswap32(st->tx_compressed);
2349 /* struct rtnl_link_stats64 */
2350 case QEMU_IFLA_STATS64:
2351 st64 = RTA_DATA(rtattr);
2352 st64->rx_packets = tswap64(st64->rx_packets);
2353 st64->tx_packets = tswap64(st64->tx_packets);
2354 st64->rx_bytes = tswap64(st64->rx_bytes);
2355 st64->tx_bytes = tswap64(st64->tx_bytes);
2356 st64->rx_errors = tswap64(st64->rx_errors);
2357 st64->tx_errors = tswap64(st64->tx_errors);
2358 st64->rx_dropped = tswap64(st64->rx_dropped);
2359 st64->tx_dropped = tswap64(st64->tx_dropped);
2360 st64->multicast = tswap64(st64->multicast);
2361 st64->collisions = tswap64(st64->collisions);
2363 /* detailed rx_errors: */
2364 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2365 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2366 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2367 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2368 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2369 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2371 /* detailed tx_errors */
2372 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2373 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2374 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2375 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2376 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2379 st64->rx_compressed = tswap64(st64->rx_compressed);
2380 st64->tx_compressed = tswap64(st64->tx_compressed);
2382 /* struct rtnl_link_ifmap */
2384 map = RTA_DATA(rtattr);
2385 map->mem_start = tswap64(map->mem_start);
2386 map->mem_end = tswap64(map->mem_end);
2387 map->base_addr = tswap64(map->base_addr);
2388 map->irq = tswap16(map->irq);
2391 case QEMU_IFLA_LINKINFO:
2392 memset(&li_context, 0, sizeof(li_context));
2393 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2395 host_to_target_data_linkinfo_nlattr);
2396 case QEMU_IFLA_AF_SPEC:
2397 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2399 host_to_target_data_spec_nlattr);
2401 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2407 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2410 struct ifa_cacheinfo *ci;
2412 switch (rtattr->rta_type) {
2413 /* binary: depends on family type */
2423 u32 = RTA_DATA(rtattr);
2424 *u32 = tswap32(*u32);
2426 /* struct ifa_cacheinfo */
2428 ci = RTA_DATA(rtattr);
2429 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2430 ci->ifa_valid = tswap32(ci->ifa_valid);
2431 ci->cstamp = tswap32(ci->cstamp);
2432 ci->tstamp = tswap32(ci->tstamp);
2435 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2441 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2444 switch (rtattr->rta_type) {
2445 /* binary: depends on family type */
2454 u32 = RTA_DATA(rtattr);
2455 *u32 = tswap32(*u32);
2458 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2464 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2465 uint32_t rtattr_len)
2467 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2468 host_to_target_data_link_rtattr);
2471 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2472 uint32_t rtattr_len)
2474 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2475 host_to_target_data_addr_rtattr);
2478 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2479 uint32_t rtattr_len)
2481 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2482 host_to_target_data_route_rtattr);
2485 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2488 struct ifinfomsg *ifi;
2489 struct ifaddrmsg *ifa;
2492 nlmsg_len = nlh->nlmsg_len;
2493 switch (nlh->nlmsg_type) {
2497 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2498 ifi = NLMSG_DATA(nlh);
2499 ifi->ifi_type = tswap16(ifi->ifi_type);
2500 ifi->ifi_index = tswap32(ifi->ifi_index);
2501 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2502 ifi->ifi_change = tswap32(ifi->ifi_change);
2503 host_to_target_link_rtattr(IFLA_RTA(ifi),
2504 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2510 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2511 ifa = NLMSG_DATA(nlh);
2512 ifa->ifa_index = tswap32(ifa->ifa_index);
2513 host_to_target_addr_rtattr(IFA_RTA(ifa),
2514 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2520 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2521 rtm = NLMSG_DATA(nlh);
2522 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2523 host_to_target_route_rtattr(RTM_RTA(rtm),
2524 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2528 return -TARGET_EINVAL;
2533 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2536 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2539 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2541 abi_long (*target_to_host_rtattr)
2546 while (len >= sizeof(struct rtattr)) {
2547 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2548 tswap16(rtattr->rta_len) > len) {
2551 rtattr->rta_len = tswap16(rtattr->rta_len);
2552 rtattr->rta_type = tswap16(rtattr->rta_type);
2553 ret = target_to_host_rtattr(rtattr);
2557 len -= RTA_ALIGN(rtattr->rta_len);
2558 rtattr = (struct rtattr *)(((char *)rtattr) +
2559 RTA_ALIGN(rtattr->rta_len));
2564 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2566 switch (rtattr->rta_type) {
2568 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2574 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2576 switch (rtattr->rta_type) {
2577 /* binary: depends on family type */
2582 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2588 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2591 switch (rtattr->rta_type) {
2592 /* binary: depends on family type */
2599 u32 = RTA_DATA(rtattr);
2600 *u32 = tswap32(*u32);
2603 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2609 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2610 uint32_t rtattr_len)
2612 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2613 target_to_host_data_link_rtattr);
2616 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2617 uint32_t rtattr_len)
2619 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2620 target_to_host_data_addr_rtattr);
2623 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2624 uint32_t rtattr_len)
2626 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2627 target_to_host_data_route_rtattr);
2630 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2632 struct ifinfomsg *ifi;
2633 struct ifaddrmsg *ifa;
2636 switch (nlh->nlmsg_type) {
2641 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2642 ifi = NLMSG_DATA(nlh);
2643 ifi->ifi_type = tswap16(ifi->ifi_type);
2644 ifi->ifi_index = tswap32(ifi->ifi_index);
2645 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2646 ifi->ifi_change = tswap32(ifi->ifi_change);
2647 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2648 NLMSG_LENGTH(sizeof(*ifi)));
2654 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2655 ifa = NLMSG_DATA(nlh);
2656 ifa->ifa_index = tswap32(ifa->ifa_index);
2657 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2658 NLMSG_LENGTH(sizeof(*ifa)));
2665 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2666 rtm = NLMSG_DATA(nlh);
2667 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2668 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2669 NLMSG_LENGTH(sizeof(*rtm)));
2673 return -TARGET_EOPNOTSUPP;
2678 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2680 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2682 #endif /* CONFIG_RTNETLINK */
2684 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2686 switch (nlh->nlmsg_type) {
2688 gemu_log("Unknown host audit message type %d\n",
2690 return -TARGET_EINVAL;
2695 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2698 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2701 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2703 switch (nlh->nlmsg_type) {
2705 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2706 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2709 gemu_log("Unknown target audit message type %d\n",
2711 return -TARGET_EINVAL;
2717 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2719 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2722 /* do_setsockopt() Must return target values and target errnos. */
2723 static abi_long do_setsockopt(int sockfd, int level, int optname,
2724 abi_ulong optval_addr, socklen_t optlen)
2728 struct ip_mreqn *ip_mreq;
2729 struct ip_mreq_source *ip_mreq_source;
2733 /* TCP options all take an 'int' value. */
2734 if (optlen < sizeof(uint32_t))
2735 return -TARGET_EINVAL;
2737 if (get_user_u32(val, optval_addr))
2738 return -TARGET_EFAULT;
2739 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2746 case IP_ROUTER_ALERT:
2750 case IP_MTU_DISCOVER:
2756 case IP_MULTICAST_TTL:
2757 case IP_MULTICAST_LOOP:
2759 if (optlen >= sizeof(uint32_t)) {
2760 if (get_user_u32(val, optval_addr))
2761 return -TARGET_EFAULT;
2762 } else if (optlen >= 1) {
2763 if (get_user_u8(val, optval_addr))
2764 return -TARGET_EFAULT;
2766 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2768 case IP_ADD_MEMBERSHIP:
2769 case IP_DROP_MEMBERSHIP:
2770 if (optlen < sizeof (struct target_ip_mreq) ||
2771 optlen > sizeof (struct target_ip_mreqn))
2772 return -TARGET_EINVAL;
2774 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2775 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2776 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2779 case IP_BLOCK_SOURCE:
2780 case IP_UNBLOCK_SOURCE:
2781 case IP_ADD_SOURCE_MEMBERSHIP:
2782 case IP_DROP_SOURCE_MEMBERSHIP:
2783 if (optlen != sizeof (struct target_ip_mreq_source))
2784 return -TARGET_EINVAL;
2786 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2787 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2788 unlock_user (ip_mreq_source, optval_addr, 0);
2797 case IPV6_MTU_DISCOVER:
2800 case IPV6_RECVPKTINFO:
2802 if (optlen < sizeof(uint32_t)) {
2803 return -TARGET_EINVAL;
2805 if (get_user_u32(val, optval_addr)) {
2806 return -TARGET_EFAULT;
2808 ret = get_errno(setsockopt(sockfd, level, optname,
2809 &val, sizeof(val)));
2818 /* struct icmp_filter takes an u32 value */
2819 if (optlen < sizeof(uint32_t)) {
2820 return -TARGET_EINVAL;
2823 if (get_user_u32(val, optval_addr)) {
2824 return -TARGET_EFAULT;
2826 ret = get_errno(setsockopt(sockfd, level, optname,
2827 &val, sizeof(val)));
2834 case TARGET_SOL_SOCKET:
2836 case TARGET_SO_RCVTIMEO:
2840 optname = SO_RCVTIMEO;
2843 if (optlen != sizeof(struct target_timeval)) {
2844 return -TARGET_EINVAL;
2847 if (copy_from_user_timeval(&tv, optval_addr)) {
2848 return -TARGET_EFAULT;
2851 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2855 case TARGET_SO_SNDTIMEO:
2856 optname = SO_SNDTIMEO;
2858 case TARGET_SO_ATTACH_FILTER:
2860 struct target_sock_fprog *tfprog;
2861 struct target_sock_filter *tfilter;
2862 struct sock_fprog fprog;
2863 struct sock_filter *filter;
2866 if (optlen != sizeof(*tfprog)) {
2867 return -TARGET_EINVAL;
2869 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2870 return -TARGET_EFAULT;
2872 if (!lock_user_struct(VERIFY_READ, tfilter,
2873 tswapal(tfprog->filter), 0)) {
2874 unlock_user_struct(tfprog, optval_addr, 1);
2875 return -TARGET_EFAULT;
2878 fprog.len = tswap16(tfprog->len);
2879 filter = g_try_new(struct sock_filter, fprog.len);
2880 if (filter == NULL) {
2881 unlock_user_struct(tfilter, tfprog->filter, 1);
2882 unlock_user_struct(tfprog, optval_addr, 1);
2883 return -TARGET_ENOMEM;
2885 for (i = 0; i < fprog.len; i++) {
2886 filter[i].code = tswap16(tfilter[i].code);
2887 filter[i].jt = tfilter[i].jt;
2888 filter[i].jf = tfilter[i].jf;
2889 filter[i].k = tswap32(tfilter[i].k);
2891 fprog.filter = filter;
2893 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2894 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2897 unlock_user_struct(tfilter, tfprog->filter, 1);
2898 unlock_user_struct(tfprog, optval_addr, 1);
2901 case TARGET_SO_BINDTODEVICE:
2903 char *dev_ifname, *addr_ifname;
2905 if (optlen > IFNAMSIZ - 1) {
2906 optlen = IFNAMSIZ - 1;
2908 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2910 return -TARGET_EFAULT;
2912 optname = SO_BINDTODEVICE;
2913 addr_ifname = alloca(IFNAMSIZ);
2914 memcpy(addr_ifname, dev_ifname, optlen);
2915 addr_ifname[optlen] = 0;
2916 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2917 addr_ifname, optlen));
2918 unlock_user (dev_ifname, optval_addr, 0);
2921 /* Options with 'int' argument. */
2922 case TARGET_SO_DEBUG:
2925 case TARGET_SO_REUSEADDR:
2926 optname = SO_REUSEADDR;
2928 case TARGET_SO_TYPE:
2931 case TARGET_SO_ERROR:
2934 case TARGET_SO_DONTROUTE:
2935 optname = SO_DONTROUTE;
2937 case TARGET_SO_BROADCAST:
2938 optname = SO_BROADCAST;
2940 case TARGET_SO_SNDBUF:
2941 optname = SO_SNDBUF;
2943 case TARGET_SO_SNDBUFFORCE:
2944 optname = SO_SNDBUFFORCE;
2946 case TARGET_SO_RCVBUF:
2947 optname = SO_RCVBUF;
2949 case TARGET_SO_RCVBUFFORCE:
2950 optname = SO_RCVBUFFORCE;
2952 case TARGET_SO_KEEPALIVE:
2953 optname = SO_KEEPALIVE;
2955 case TARGET_SO_OOBINLINE:
2956 optname = SO_OOBINLINE;
2958 case TARGET_SO_NO_CHECK:
2959 optname = SO_NO_CHECK;
2961 case TARGET_SO_PRIORITY:
2962 optname = SO_PRIORITY;
2965 case TARGET_SO_BSDCOMPAT:
2966 optname = SO_BSDCOMPAT;
2969 case TARGET_SO_PASSCRED:
2970 optname = SO_PASSCRED;
2972 case TARGET_SO_PASSSEC:
2973 optname = SO_PASSSEC;
2975 case TARGET_SO_TIMESTAMP:
2976 optname = SO_TIMESTAMP;
2978 case TARGET_SO_RCVLOWAT:
2979 optname = SO_RCVLOWAT;
2985 if (optlen < sizeof(uint32_t))
2986 return -TARGET_EINVAL;
2988 if (get_user_u32(val, optval_addr))
2989 return -TARGET_EFAULT;
2990 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2994 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2995 ret = -TARGET_ENOPROTOOPT;
3000 /* do_getsockopt() Must return target values and target errnos. */
3001 static abi_long do_getsockopt(int sockfd, int level, int optname,
3002 abi_ulong optval_addr, abi_ulong optlen)
3009 case TARGET_SOL_SOCKET:
3012 /* These don't just return a single integer */
3013 case TARGET_SO_LINGER:
3014 case TARGET_SO_RCVTIMEO:
3015 case TARGET_SO_SNDTIMEO:
3016 case TARGET_SO_PEERNAME:
3018 case TARGET_SO_PEERCRED: {
3021 struct target_ucred *tcr;
3023 if (get_user_u32(len, optlen)) {
3024 return -TARGET_EFAULT;
3027 return -TARGET_EINVAL;
3031 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3039 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3040 return -TARGET_EFAULT;
3042 __put_user(cr.pid, &tcr->pid);
3043 __put_user(cr.uid, &tcr->uid);
3044 __put_user(cr.gid, &tcr->gid);
3045 unlock_user_struct(tcr, optval_addr, 1);
3046 if (put_user_u32(len, optlen)) {
3047 return -TARGET_EFAULT;
3051 /* Options with 'int' argument. */
3052 case TARGET_SO_DEBUG:
3055 case TARGET_SO_REUSEADDR:
3056 optname = SO_REUSEADDR;
3058 case TARGET_SO_TYPE:
3061 case TARGET_SO_ERROR:
3064 case TARGET_SO_DONTROUTE:
3065 optname = SO_DONTROUTE;
3067 case TARGET_SO_BROADCAST:
3068 optname = SO_BROADCAST;
3070 case TARGET_SO_SNDBUF:
3071 optname = SO_SNDBUF;
3073 case TARGET_SO_RCVBUF:
3074 optname = SO_RCVBUF;
3076 case TARGET_SO_KEEPALIVE:
3077 optname = SO_KEEPALIVE;
3079 case TARGET_SO_OOBINLINE:
3080 optname = SO_OOBINLINE;
3082 case TARGET_SO_NO_CHECK:
3083 optname = SO_NO_CHECK;
3085 case TARGET_SO_PRIORITY:
3086 optname = SO_PRIORITY;
3089 case TARGET_SO_BSDCOMPAT:
3090 optname = SO_BSDCOMPAT;
3093 case TARGET_SO_PASSCRED:
3094 optname = SO_PASSCRED;
3096 case TARGET_SO_TIMESTAMP:
3097 optname = SO_TIMESTAMP;
3099 case TARGET_SO_RCVLOWAT:
3100 optname = SO_RCVLOWAT;
3102 case TARGET_SO_ACCEPTCONN:
3103 optname = SO_ACCEPTCONN;
3110 /* TCP options all take an 'int' value. */
3112 if (get_user_u32(len, optlen))
3113 return -TARGET_EFAULT;
3115 return -TARGET_EINVAL;
3117 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3120 if (optname == SO_TYPE) {
3121 val = host_to_target_sock_type(val);
3126 if (put_user_u32(val, optval_addr))
3127 return -TARGET_EFAULT;
3129 if (put_user_u8(val, optval_addr))
3130 return -TARGET_EFAULT;
3132 if (put_user_u32(len, optlen))
3133 return -TARGET_EFAULT;
3140 case IP_ROUTER_ALERT:
3144 case IP_MTU_DISCOVER:
3150 case IP_MULTICAST_TTL:
3151 case IP_MULTICAST_LOOP:
3152 if (get_user_u32(len, optlen))
3153 return -TARGET_EFAULT;
3155 return -TARGET_EINVAL;
3157 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3160 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3162 if (put_user_u32(len, optlen)
3163 || put_user_u8(val, optval_addr))
3164 return -TARGET_EFAULT;
3166 if (len > sizeof(int))
3168 if (put_user_u32(len, optlen)
3169 || put_user_u32(val, optval_addr))
3170 return -TARGET_EFAULT;
3174 ret = -TARGET_ENOPROTOOPT;
3180 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3182 ret = -TARGET_EOPNOTSUPP;
3188 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3189 abi_ulong count, int copy)
3191 struct target_iovec *target_vec;
3193 abi_ulong total_len, max_len;
3196 bool bad_address = false;
3202 if (count > IOV_MAX) {
3207 vec = g_try_new0(struct iovec, count);
3213 target_vec = lock_user(VERIFY_READ, target_addr,
3214 count * sizeof(struct target_iovec), 1);
3215 if (target_vec == NULL) {
3220 /* ??? If host page size > target page size, this will result in a
3221 value larger than what we can actually support. */
3222 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3225 for (i = 0; i < count; i++) {
3226 abi_ulong base = tswapal(target_vec[i].iov_base);
3227 abi_long len = tswapal(target_vec[i].iov_len);
3232 } else if (len == 0) {
3233 /* Zero length pointer is ignored. */
3234 vec[i].iov_base = 0;
3236 vec[i].iov_base = lock_user(type, base, len, copy);
3237 /* If the first buffer pointer is bad, this is a fault. But
3238 * subsequent bad buffers will result in a partial write; this
3239 * is realized by filling the vector with null pointers and
3241 if (!vec[i].iov_base) {
3252 if (len > max_len - total_len) {
3253 len = max_len - total_len;
3256 vec[i].iov_len = len;
3260 unlock_user(target_vec, target_addr, 0);
3265 if (tswapal(target_vec[i].iov_len) > 0) {
3266 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3269 unlock_user(target_vec, target_addr, 0);
3276 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3277 abi_ulong count, int copy)
3279 struct target_iovec *target_vec;
3282 target_vec = lock_user(VERIFY_READ, target_addr,
3283 count * sizeof(struct target_iovec), 1);
3285 for (i = 0; i < count; i++) {
3286 abi_ulong base = tswapal(target_vec[i].iov_base);
3287 abi_long len = tswapal(target_vec[i].iov_len);
3291 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3293 unlock_user(target_vec, target_addr, 0);
3299 static inline int target_to_host_sock_type(int *type)
3302 int target_type = *type;
3304 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3305 case TARGET_SOCK_DGRAM:
3306 host_type = SOCK_DGRAM;
3308 case TARGET_SOCK_STREAM:
3309 host_type = SOCK_STREAM;
3312 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3315 if (target_type & TARGET_SOCK_CLOEXEC) {
3316 #if defined(SOCK_CLOEXEC)
3317 host_type |= SOCK_CLOEXEC;
3319 return -TARGET_EINVAL;
3322 if (target_type & TARGET_SOCK_NONBLOCK) {
3323 #if defined(SOCK_NONBLOCK)
3324 host_type |= SOCK_NONBLOCK;
3325 #elif !defined(O_NONBLOCK)
3326 return -TARGET_EINVAL;
3333 /* Try to emulate socket type flags after socket creation. */
3334 static int sock_flags_fixup(int fd, int target_type)
3336 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3337 if (target_type & TARGET_SOCK_NONBLOCK) {
3338 int flags = fcntl(fd, F_GETFL);
3339 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3341 return -TARGET_EINVAL;
3348 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3349 abi_ulong target_addr,
3352 struct sockaddr *addr = host_addr;
3353 struct target_sockaddr *target_saddr;
3355 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3356 if (!target_saddr) {
3357 return -TARGET_EFAULT;
3360 memcpy(addr, target_saddr, len);
3361 addr->sa_family = tswap16(target_saddr->sa_family);
3362 /* spkt_protocol is big-endian */
3364 unlock_user(target_saddr, target_addr, 0);
3368 static TargetFdTrans target_packet_trans = {
3369 .target_to_host_addr = packet_target_to_host_sockaddr,
3372 #ifdef CONFIG_RTNETLINK
3373 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3377 ret = target_to_host_nlmsg_route(buf, len);
3385 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3389 ret = host_to_target_nlmsg_route(buf, len);
3397 static TargetFdTrans target_netlink_route_trans = {
3398 .target_to_host_data = netlink_route_target_to_host,
3399 .host_to_target_data = netlink_route_host_to_target,
3401 #endif /* CONFIG_RTNETLINK */
3403 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3407 ret = target_to_host_nlmsg_audit(buf, len);
3415 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3419 ret = host_to_target_nlmsg_audit(buf, len);
3427 static TargetFdTrans target_netlink_audit_trans = {
3428 .target_to_host_data = netlink_audit_target_to_host,
3429 .host_to_target_data = netlink_audit_host_to_target,
3432 /* do_socket() Must return target values and target errnos. */
3433 static abi_long do_socket(int domain, int type, int protocol)
3435 int target_type = type;
3438 ret = target_to_host_sock_type(&type);
3443 if (domain == PF_NETLINK && !(
3444 #ifdef CONFIG_RTNETLINK
3445 protocol == NETLINK_ROUTE ||
3447 protocol == NETLINK_KOBJECT_UEVENT ||
3448 protocol == NETLINK_AUDIT)) {
3449 return -EPFNOSUPPORT;
3452 if (domain == AF_PACKET ||
3453 (domain == AF_INET && type == SOCK_PACKET)) {
3454 protocol = tswap16(protocol);
3457 ret = get_errno(socket(domain, type, protocol));
3459 ret = sock_flags_fixup(ret, target_type);
3460 if (type == SOCK_PACKET) {
3461 /* Manage an obsolete case :
3462 * if socket type is SOCK_PACKET, bind by name
3464 fd_trans_register(ret, &target_packet_trans);
3465 } else if (domain == PF_NETLINK) {
3467 #ifdef CONFIG_RTNETLINK
3469 fd_trans_register(ret, &target_netlink_route_trans);
3472 case NETLINK_KOBJECT_UEVENT:
3473 /* nothing to do: messages are strings */
3476 fd_trans_register(ret, &target_netlink_audit_trans);
3479 g_assert_not_reached();
3486 /* do_bind() Must return target values and target errnos. */
3487 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3493 if ((int)addrlen < 0) {
3494 return -TARGET_EINVAL;
3497 addr = alloca(addrlen+1);
3499 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3503 return get_errno(bind(sockfd, addr, addrlen));
3506 /* do_connect() Must return target values and target errnos. */
3507 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3513 if ((int)addrlen < 0) {
3514 return -TARGET_EINVAL;
3517 addr = alloca(addrlen+1);
3519 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3523 return get_errno(safe_connect(sockfd, addr, addrlen));
3526 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3527 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3528 int flags, int send)
3534 abi_ulong target_vec;
3536 if (msgp->msg_name) {
3537 msg.msg_namelen = tswap32(msgp->msg_namelen);
3538 msg.msg_name = alloca(msg.msg_namelen+1);
3539 ret = target_to_host_sockaddr(fd, msg.msg_name,
3540 tswapal(msgp->msg_name),
3542 if (ret == -TARGET_EFAULT) {
3543 /* For connected sockets msg_name and msg_namelen must
3544 * be ignored, so returning EFAULT immediately is wrong.
3545 * Instead, pass a bad msg_name to the host kernel, and
3546 * let it decide whether to return EFAULT or not.
3548 msg.msg_name = (void *)-1;
3553 msg.msg_name = NULL;
3554 msg.msg_namelen = 0;
3556 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3557 msg.msg_control = alloca(msg.msg_controllen);
3558 msg.msg_flags = tswap32(msgp->msg_flags);
3560 count = tswapal(msgp->msg_iovlen);
3561 target_vec = tswapal(msgp->msg_iov);
3563 if (count > IOV_MAX) {
3564 /* sendrcvmsg returns a different errno for this condition than
3565 * readv/writev, so we must catch it here before lock_iovec() does.
3567 ret = -TARGET_EMSGSIZE;
3571 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3572 target_vec, count, send);
3574 ret = -host_to_target_errno(errno);
3577 msg.msg_iovlen = count;
3581 if (fd_trans_target_to_host_data(fd)) {
3584 host_msg = g_malloc(msg.msg_iov->iov_len);
3585 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3586 ret = fd_trans_target_to_host_data(fd)(host_msg,
3587 msg.msg_iov->iov_len);
3589 msg.msg_iov->iov_base = host_msg;
3590 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3594 ret = target_to_host_cmsg(&msg, msgp);
3596 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3600 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3601 if (!is_error(ret)) {
3603 if (fd_trans_host_to_target_data(fd)) {
3604 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3607 ret = host_to_target_cmsg(msgp, &msg);
3609 if (!is_error(ret)) {
3610 msgp->msg_namelen = tswap32(msg.msg_namelen);
3611 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3612 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3613 msg.msg_name, msg.msg_namelen);
3625 unlock_iovec(vec, target_vec, count, !send);
3630 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3631 int flags, int send)
3634 struct target_msghdr *msgp;
3636 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3640 return -TARGET_EFAULT;
3642 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3643 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3647 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3648 * so it might not have this *mmsg-specific flag either.
3650 #ifndef MSG_WAITFORONE
3651 #define MSG_WAITFORONE 0x10000
3654 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3655 unsigned int vlen, unsigned int flags,
3658 struct target_mmsghdr *mmsgp;
3662 if (vlen > UIO_MAXIOV) {
3666 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3668 return -TARGET_EFAULT;
3671 for (i = 0; i < vlen; i++) {
3672 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3673 if (is_error(ret)) {
3676 mmsgp[i].msg_len = tswap32(ret);
3677 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3678 if (flags & MSG_WAITFORONE) {
3679 flags |= MSG_DONTWAIT;
3683 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3685 /* Return number of datagrams sent if we sent any at all;
3686 * otherwise return the error.
3694 /* do_accept4() Must return target values and target errnos. */
3695 static abi_long do_accept4(int fd, abi_ulong target_addr,
3696 abi_ulong target_addrlen_addr, int flags)
3703 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3705 if (target_addr == 0) {
3706 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3709 /* linux returns EINVAL if addrlen pointer is invalid */
3710 if (get_user_u32(addrlen, target_addrlen_addr))
3711 return -TARGET_EINVAL;
3713 if ((int)addrlen < 0) {
3714 return -TARGET_EINVAL;
3717 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3718 return -TARGET_EINVAL;
3720 addr = alloca(addrlen);
3722 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3723 if (!is_error(ret)) {
3724 host_to_target_sockaddr(target_addr, addr, addrlen);
3725 if (put_user_u32(addrlen, target_addrlen_addr))
3726 ret = -TARGET_EFAULT;
3731 /* do_getpeername() Must return target values and target errnos. */
3732 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3733 abi_ulong target_addrlen_addr)
3739 if (get_user_u32(addrlen, target_addrlen_addr))
3740 return -TARGET_EFAULT;
3742 if ((int)addrlen < 0) {
3743 return -TARGET_EINVAL;
3746 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3747 return -TARGET_EFAULT;
3749 addr = alloca(addrlen);
3751 ret = get_errno(getpeername(fd, addr, &addrlen));
3752 if (!is_error(ret)) {
3753 host_to_target_sockaddr(target_addr, addr, addrlen);
3754 if (put_user_u32(addrlen, target_addrlen_addr))
3755 ret = -TARGET_EFAULT;
3760 /* do_getsockname() Must return target values and target errnos. */
3761 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3762 abi_ulong target_addrlen_addr)
3768 if (get_user_u32(addrlen, target_addrlen_addr))
3769 return -TARGET_EFAULT;
3771 if ((int)addrlen < 0) {
3772 return -TARGET_EINVAL;
3775 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3776 return -TARGET_EFAULT;
3778 addr = alloca(addrlen);
3780 ret = get_errno(getsockname(fd, addr, &addrlen));
3781 if (!is_error(ret)) {
3782 host_to_target_sockaddr(target_addr, addr, addrlen);
3783 if (put_user_u32(addrlen, target_addrlen_addr))
3784 ret = -TARGET_EFAULT;
3789 /* do_socketpair() Must return target values and target errnos. */
3790 static abi_long do_socketpair(int domain, int type, int protocol,
3791 abi_ulong target_tab_addr)
3796 target_to_host_sock_type(&type);
3798 ret = get_errno(socketpair(domain, type, protocol, tab));
3799 if (!is_error(ret)) {
3800 if (put_user_s32(tab[0], target_tab_addr)
3801 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3802 ret = -TARGET_EFAULT;
3807 /* do_sendto() Must return target values and target errnos. */
3808 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3809 abi_ulong target_addr, socklen_t addrlen)
3813 void *copy_msg = NULL;
3816 if ((int)addrlen < 0) {
3817 return -TARGET_EINVAL;
3820 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3822 return -TARGET_EFAULT;
3823 if (fd_trans_target_to_host_data(fd)) {
3824 copy_msg = host_msg;
3825 host_msg = g_malloc(len);
3826 memcpy(host_msg, copy_msg, len);
3827 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3833 addr = alloca(addrlen+1);
3834 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3838 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3840 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3845 host_msg = copy_msg;
3847 unlock_user(host_msg, msg, 0);
3851 /* do_recvfrom() Must return target values and target errnos. */
3852 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3853 abi_ulong target_addr,
3854 abi_ulong target_addrlen)
3861 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3863 return -TARGET_EFAULT;
3865 if (get_user_u32(addrlen, target_addrlen)) {
3866 ret = -TARGET_EFAULT;
3869 if ((int)addrlen < 0) {
3870 ret = -TARGET_EINVAL;
3873 addr = alloca(addrlen);
3874 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3877 addr = NULL; /* To keep compiler quiet. */
3878 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3880 if (!is_error(ret)) {
3881 if (fd_trans_host_to_target_data(fd)) {
3882 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3885 host_to_target_sockaddr(target_addr, addr, addrlen);
3886 if (put_user_u32(addrlen, target_addrlen)) {
3887 ret = -TARGET_EFAULT;
3891 unlock_user(host_msg, msg, len);
3894 unlock_user(host_msg, msg, 0);
3899 #ifdef TARGET_NR_socketcall
3900 /* do_socketcall() must return target values and target errnos. */
3901 static abi_long do_socketcall(int num, abi_ulong vptr)
3903 static const unsigned nargs[] = { /* number of arguments per operation */
3904 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3905 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3906 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3907 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3908 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3909 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3910 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3911 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3912 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3913 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3914 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3915 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3916 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3917 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3918 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3919 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3920 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3921 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3922 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3923 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3925 abi_long a[6]; /* max 6 args */
3928 /* check the range of the first argument num */
3929 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3930 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3931 return -TARGET_EINVAL;
3933 /* ensure we have space for args */
3934 if (nargs[num] > ARRAY_SIZE(a)) {
3935 return -TARGET_EINVAL;
3937 /* collect the arguments in a[] according to nargs[] */
3938 for (i = 0; i < nargs[num]; ++i) {
3939 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3940 return -TARGET_EFAULT;
3943 /* now when we have the args, invoke the appropriate underlying function */
3945 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3946 return do_socket(a[0], a[1], a[2]);
3947 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3948 return do_bind(a[0], a[1], a[2]);
3949 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3950 return do_connect(a[0], a[1], a[2]);
3951 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3952 return get_errno(listen(a[0], a[1]));
3953 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3954 return do_accept4(a[0], a[1], a[2], 0);
3955 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3956 return do_getsockname(a[0], a[1], a[2]);
3957 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3958 return do_getpeername(a[0], a[1], a[2]);
3959 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3960 return do_socketpair(a[0], a[1], a[2], a[3]);
3961 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3962 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3963 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3964 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3965 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3966 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3967 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3968 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3969 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3970 return get_errno(shutdown(a[0], a[1]));
3971 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3972 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3973 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3974 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3975 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3976 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3977 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3978 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3979 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3980 return do_accept4(a[0], a[1], a[2], a[3]);
3981 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3982 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3983 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3984 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3986 gemu_log("Unsupported socketcall: %d\n", num);
3987 return -TARGET_EINVAL;
3992 #define N_SHM_REGIONS 32
3994 static struct shm_region {
3998 } shm_regions[N_SHM_REGIONS];
4000 #ifndef TARGET_SEMID64_DS
4001 /* asm-generic version of this struct */
4002 struct target_semid64_ds
4004 struct target_ipc_perm sem_perm;
4005 abi_ulong sem_otime;
4006 #if TARGET_ABI_BITS == 32
4007 abi_ulong __unused1;
4009 abi_ulong sem_ctime;
4010 #if TARGET_ABI_BITS == 32
4011 abi_ulong __unused2;
4013 abi_ulong sem_nsems;
4014 abi_ulong __unused3;
4015 abi_ulong __unused4;
4019 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4020 abi_ulong target_addr)
4022 struct target_ipc_perm *target_ip;
4023 struct target_semid64_ds *target_sd;
4025 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4026 return -TARGET_EFAULT;
4027 target_ip = &(target_sd->sem_perm);
4028 host_ip->__key = tswap32(target_ip->__key);
4029 host_ip->uid = tswap32(target_ip->uid);
4030 host_ip->gid = tswap32(target_ip->gid);
4031 host_ip->cuid = tswap32(target_ip->cuid);
4032 host_ip->cgid = tswap32(target_ip->cgid);
4033 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4034 host_ip->mode = tswap32(target_ip->mode);
4036 host_ip->mode = tswap16(target_ip->mode);
4038 #if defined(TARGET_PPC)
4039 host_ip->__seq = tswap32(target_ip->__seq);
4041 host_ip->__seq = tswap16(target_ip->__seq);
4043 unlock_user_struct(target_sd, target_addr, 0);
4047 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4048 struct ipc_perm *host_ip)
4050 struct target_ipc_perm *target_ip;
4051 struct target_semid64_ds *target_sd;
4053 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4054 return -TARGET_EFAULT;
4055 target_ip = &(target_sd->sem_perm);
4056 target_ip->__key = tswap32(host_ip->__key);
4057 target_ip->uid = tswap32(host_ip->uid);
4058 target_ip->gid = tswap32(host_ip->gid);
4059 target_ip->cuid = tswap32(host_ip->cuid);
4060 target_ip->cgid = tswap32(host_ip->cgid);
4061 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4062 target_ip->mode = tswap32(host_ip->mode);
4064 target_ip->mode = tswap16(host_ip->mode);
4066 #if defined(TARGET_PPC)
4067 target_ip->__seq = tswap32(host_ip->__seq);
4069 target_ip->__seq = tswap16(host_ip->__seq);
4071 unlock_user_struct(target_sd, target_addr, 1);
4075 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4076 abi_ulong target_addr)
4078 struct target_semid64_ds *target_sd;
4080 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4081 return -TARGET_EFAULT;
4082 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4083 return -TARGET_EFAULT;
4084 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4085 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4086 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4087 unlock_user_struct(target_sd, target_addr, 0);
4091 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4092 struct semid_ds *host_sd)
4094 struct target_semid64_ds *target_sd;
4096 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4097 return -TARGET_EFAULT;
4098 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4099 return -TARGET_EFAULT;
4100 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4101 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4102 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4103 unlock_user_struct(target_sd, target_addr, 1);
4107 struct target_seminfo {
4120 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4121 struct seminfo *host_seminfo)
4123 struct target_seminfo *target_seminfo;
4124 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4125 return -TARGET_EFAULT;
4126 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4127 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4128 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4129 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4130 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4131 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4132 __put_user(host_seminfo->semume, &target_seminfo->semume);
4133 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4134 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4135 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4136 unlock_user_struct(target_seminfo, target_addr, 1);
4142 struct semid_ds *buf;
4143 unsigned short *array;
4144 struct seminfo *__buf;
4147 union target_semun {
4154 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4155 abi_ulong target_addr)
4158 unsigned short *array;
4160 struct semid_ds semid_ds;
4163 semun.buf = &semid_ds;
4165 ret = semctl(semid, 0, IPC_STAT, semun);
4167 return get_errno(ret);
4169 nsems = semid_ds.sem_nsems;
4171 *host_array = g_try_new(unsigned short, nsems);
4173 return -TARGET_ENOMEM;
4175 array = lock_user(VERIFY_READ, target_addr,
4176 nsems*sizeof(unsigned short), 1);
4178 g_free(*host_array);
4179 return -TARGET_EFAULT;
4182 for(i=0; i<nsems; i++) {
4183 __get_user((*host_array)[i], &array[i]);
4185 unlock_user(array, target_addr, 0);
4190 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4191 unsigned short **host_array)
4194 unsigned short *array;
4196 struct semid_ds semid_ds;
4199 semun.buf = &semid_ds;
4201 ret = semctl(semid, 0, IPC_STAT, semun);
4203 return get_errno(ret);
4205 nsems = semid_ds.sem_nsems;
4207 array = lock_user(VERIFY_WRITE, target_addr,
4208 nsems*sizeof(unsigned short), 0);
4210 return -TARGET_EFAULT;
4212 for(i=0; i<nsems; i++) {
4213 __put_user((*host_array)[i], &array[i]);
4215 g_free(*host_array);
4216 unlock_user(array, target_addr, 1);
4221 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4222 abi_ulong target_arg)
4224 union target_semun target_su = { .buf = target_arg };
4226 struct semid_ds dsarg;
4227 unsigned short *array = NULL;
4228 struct seminfo seminfo;
4229 abi_long ret = -TARGET_EINVAL;
4236 /* In 64 bit cross-endian situations, we will erroneously pick up
4237 * the wrong half of the union for the "val" element. To rectify
4238 * this, the entire 8-byte structure is byteswapped, followed by
4239 * a swap of the 4 byte val field. In other cases, the data is
4240 * already in proper host byte order. */
4241 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4242 target_su.buf = tswapal(target_su.buf);
4243 arg.val = tswap32(target_su.val);
4245 arg.val = target_su.val;
4247 ret = get_errno(semctl(semid, semnum, cmd, arg));
4251 err = target_to_host_semarray(semid, &array, target_su.array);
4255 ret = get_errno(semctl(semid, semnum, cmd, arg));
4256 err = host_to_target_semarray(semid, target_su.array, &array);
4263 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4267 ret = get_errno(semctl(semid, semnum, cmd, arg));
4268 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4274 arg.__buf = &seminfo;
4275 ret = get_errno(semctl(semid, semnum, cmd, arg));
4276 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4284 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4291 struct target_sembuf {
4292 unsigned short sem_num;
4297 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4298 abi_ulong target_addr,
4301 struct target_sembuf *target_sembuf;
4304 target_sembuf = lock_user(VERIFY_READ, target_addr,
4305 nsops*sizeof(struct target_sembuf), 1);
4307 return -TARGET_EFAULT;
4309 for(i=0; i<nsops; i++) {
4310 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4311 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4312 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4315 unlock_user(target_sembuf, target_addr, 0);
4320 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4322 struct sembuf sops[nsops];
4324 if (target_to_host_sembuf(sops, ptr, nsops))
4325 return -TARGET_EFAULT;
4327 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4330 struct target_msqid_ds
4332 struct target_ipc_perm msg_perm;
4333 abi_ulong msg_stime;
4334 #if TARGET_ABI_BITS == 32
4335 abi_ulong __unused1;
4337 abi_ulong msg_rtime;
4338 #if TARGET_ABI_BITS == 32
4339 abi_ulong __unused2;
4341 abi_ulong msg_ctime;
4342 #if TARGET_ABI_BITS == 32
4343 abi_ulong __unused3;
4345 abi_ulong __msg_cbytes;
4347 abi_ulong msg_qbytes;
4348 abi_ulong msg_lspid;
4349 abi_ulong msg_lrpid;
4350 abi_ulong __unused4;
4351 abi_ulong __unused5;
4354 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4355 abi_ulong target_addr)
4357 struct target_msqid_ds *target_md;
4359 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4360 return -TARGET_EFAULT;
4361 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4362 return -TARGET_EFAULT;
4363 host_md->msg_stime = tswapal(target_md->msg_stime);
4364 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4365 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4366 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4367 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4368 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4369 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4370 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4371 unlock_user_struct(target_md, target_addr, 0);
4375 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4376 struct msqid_ds *host_md)
4378 struct target_msqid_ds *target_md;
4380 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4381 return -TARGET_EFAULT;
4382 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4383 return -TARGET_EFAULT;
4384 target_md->msg_stime = tswapal(host_md->msg_stime);
4385 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4386 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4387 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4388 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4389 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4390 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4391 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4392 unlock_user_struct(target_md, target_addr, 1);
4396 struct target_msginfo {
4404 unsigned short int msgseg;
4407 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4408 struct msginfo *host_msginfo)
4410 struct target_msginfo *target_msginfo;
4411 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4412 return -TARGET_EFAULT;
4413 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4414 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4415 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4416 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4417 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4418 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4419 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4420 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4421 unlock_user_struct(target_msginfo, target_addr, 1);
4425 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4427 struct msqid_ds dsarg;
4428 struct msginfo msginfo;
4429 abi_long ret = -TARGET_EINVAL;
4437 if (target_to_host_msqid_ds(&dsarg,ptr))
4438 return -TARGET_EFAULT;
4439 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4440 if (host_to_target_msqid_ds(ptr,&dsarg))
4441 return -TARGET_EFAULT;
4444 ret = get_errno(msgctl(msgid, cmd, NULL));
4448 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4449 if (host_to_target_msginfo(ptr, &msginfo))
4450 return -TARGET_EFAULT;
4457 struct target_msgbuf {
4462 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4463 ssize_t msgsz, int msgflg)
4465 struct target_msgbuf *target_mb;
4466 struct msgbuf *host_mb;
4470 return -TARGET_EINVAL;
4473 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4474 return -TARGET_EFAULT;
4475 host_mb = g_try_malloc(msgsz + sizeof(long));
4477 unlock_user_struct(target_mb, msgp, 0);
4478 return -TARGET_ENOMEM;
4480 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4481 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4482 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4484 unlock_user_struct(target_mb, msgp, 0);
4489 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4490 ssize_t msgsz, abi_long msgtyp,
4493 struct target_msgbuf *target_mb;
4495 struct msgbuf *host_mb;
4499 return -TARGET_EINVAL;
4502 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4503 return -TARGET_EFAULT;
4505 host_mb = g_try_malloc(msgsz + sizeof(long));
4507 ret = -TARGET_ENOMEM;
4510 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4513 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4514 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4515 if (!target_mtext) {
4516 ret = -TARGET_EFAULT;
4519 memcpy(target_mb->mtext, host_mb->mtext, ret);
4520 unlock_user(target_mtext, target_mtext_addr, ret);
4523 target_mb->mtype = tswapal(host_mb->mtype);
4527 unlock_user_struct(target_mb, msgp, 1);
4532 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4533 abi_ulong target_addr)
4535 struct target_shmid_ds *target_sd;
4537 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4538 return -TARGET_EFAULT;
4539 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4540 return -TARGET_EFAULT;
4541 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4542 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4543 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4544 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4545 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4546 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4547 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4548 unlock_user_struct(target_sd, target_addr, 0);
4552 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4553 struct shmid_ds *host_sd)
4555 struct target_shmid_ds *target_sd;
4557 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4558 return -TARGET_EFAULT;
4559 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4560 return -TARGET_EFAULT;
4561 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4562 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4563 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4564 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4565 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4566 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4567 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4568 unlock_user_struct(target_sd, target_addr, 1);
4572 struct target_shminfo {
4580 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4581 struct shminfo *host_shminfo)
4583 struct target_shminfo *target_shminfo;
4584 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4585 return -TARGET_EFAULT;
4586 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4587 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4588 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4589 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4590 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4591 unlock_user_struct(target_shminfo, target_addr, 1);
4595 struct target_shm_info {
4600 abi_ulong swap_attempts;
4601 abi_ulong swap_successes;
4604 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4605 struct shm_info *host_shm_info)
4607 struct target_shm_info *target_shm_info;
4608 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4609 return -TARGET_EFAULT;
4610 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4611 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4612 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4613 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4614 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4615 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4616 unlock_user_struct(target_shm_info, target_addr, 1);
4620 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4622 struct shmid_ds dsarg;
4623 struct shminfo shminfo;
4624 struct shm_info shm_info;
4625 abi_long ret = -TARGET_EINVAL;
4633 if (target_to_host_shmid_ds(&dsarg, buf))
4634 return -TARGET_EFAULT;
4635 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4636 if (host_to_target_shmid_ds(buf, &dsarg))
4637 return -TARGET_EFAULT;
4640 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4641 if (host_to_target_shminfo(buf, &shminfo))
4642 return -TARGET_EFAULT;
4645 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4646 if (host_to_target_shm_info(buf, &shm_info))
4647 return -TARGET_EFAULT;
4652 ret = get_errno(shmctl(shmid, cmd, NULL));
4659 #ifndef TARGET_FORCE_SHMLBA
4660 /* For most architectures, SHMLBA is the same as the page size;
4661 * some architectures have larger values, in which case they should
4662 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4663 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4664 * and defining its own value for SHMLBA.
4666 * The kernel also permits SHMLBA to be set by the architecture to a
4667 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4668 * this means that addresses are rounded to the large size if
4669 * SHM_RND is set but addresses not aligned to that size are not rejected
4670 * as long as they are at least page-aligned. Since the only architecture
4671 * which uses this is ia64 this code doesn't provide for that oddity.
4673 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4675 return TARGET_PAGE_SIZE;
4679 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4680 int shmid, abi_ulong shmaddr, int shmflg)
4684 struct shmid_ds shm_info;
4688 /* find out the length of the shared memory segment */
4689 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4690 if (is_error(ret)) {
4691 /* can't get length, bail out */
4695 shmlba = target_shmlba(cpu_env);
4697 if (shmaddr & (shmlba - 1)) {
4698 if (shmflg & SHM_RND) {
4699 shmaddr &= ~(shmlba - 1);
4701 return -TARGET_EINVAL;
4708 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4710 abi_ulong mmap_start;
4712 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4714 if (mmap_start == -1) {
4716 host_raddr = (void *)-1;
4718 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4721 if (host_raddr == (void *)-1) {
4723 return get_errno((long)host_raddr);
4725 raddr=h2g((unsigned long)host_raddr);
4727 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4728 PAGE_VALID | PAGE_READ |
4729 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4731 for (i = 0; i < N_SHM_REGIONS; i++) {
4732 if (!shm_regions[i].in_use) {
4733 shm_regions[i].in_use = true;
4734 shm_regions[i].start = raddr;
4735 shm_regions[i].size = shm_info.shm_segsz;
4745 static inline abi_long do_shmdt(abi_ulong shmaddr)
4749 for (i = 0; i < N_SHM_REGIONS; ++i) {
4750 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4751 shm_regions[i].in_use = false;
4752 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4757 return get_errno(shmdt(g2h(shmaddr)));
4760 #ifdef TARGET_NR_ipc
4761 /* ??? This only works with linear mappings. */
4762 /* do_ipc() must return target values and target errnos. */
4763 static abi_long do_ipc(CPUArchState *cpu_env,
4764 unsigned int call, abi_long first,
4765 abi_long second, abi_long third,
4766 abi_long ptr, abi_long fifth)
4771 version = call >> 16;
4776 ret = do_semop(first, ptr, second);
4780 ret = get_errno(semget(first, second, third));
4783 case IPCOP_semctl: {
4784 /* The semun argument to semctl is passed by value, so dereference the
4787 get_user_ual(atptr, ptr);
4788 ret = do_semctl(first, second, third, atptr);
4793 ret = get_errno(msgget(first, second));
4797 ret = do_msgsnd(first, ptr, second, third);
4801 ret = do_msgctl(first, second, ptr);
4808 struct target_ipc_kludge {
4813 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4814 ret = -TARGET_EFAULT;
4818 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4820 unlock_user_struct(tmp, ptr, 0);
4824 ret = do_msgrcv(first, ptr, second, fifth, third);
4833 raddr = do_shmat(cpu_env, first, ptr, second);
4834 if (is_error(raddr))
4835 return get_errno(raddr);
4836 if (put_user_ual(raddr, third))
4837 return -TARGET_EFAULT;
4841 ret = -TARGET_EINVAL;
4846 ret = do_shmdt(ptr);
4850 /* IPC_* flag values are the same on all linux platforms */
4851 ret = get_errno(shmget(first, second, third));
4854 /* IPC_* and SHM_* command values are the same on all linux platforms */
4856 ret = do_shmctl(first, second, ptr);
4859 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4860 ret = -TARGET_ENOSYS;
4867 /* kernel structure types definitions */
4869 #define STRUCT(name, ...) STRUCT_ ## name,
4870 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4872 #include "syscall_types.h"
4876 #undef STRUCT_SPECIAL
4878 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4879 #define STRUCT_SPECIAL(name)
4880 #include "syscall_types.h"
4882 #undef STRUCT_SPECIAL
4884 typedef struct IOCTLEntry IOCTLEntry;
4886 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4887 int fd, int cmd, abi_long arg);
4891 unsigned int host_cmd;
4894 do_ioctl_fn *do_ioctl;
4895 const argtype arg_type[5];
4898 #define IOC_R 0x0001
4899 #define IOC_W 0x0002
4900 #define IOC_RW (IOC_R | IOC_W)
4902 #define MAX_STRUCT_SIZE 4096
4904 #ifdef CONFIG_FIEMAP
4905 /* So fiemap access checks don't overflow on 32 bit systems.
4906 * This is very slightly smaller than the limit imposed by
4907 * the underlying kernel.
4909 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4910 / sizeof(struct fiemap_extent))
4912 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4913 int fd, int cmd, abi_long arg)
4915 /* The parameter for this ioctl is a struct fiemap followed
4916 * by an array of struct fiemap_extent whose size is set
4917 * in fiemap->fm_extent_count. The array is filled in by the
4920 int target_size_in, target_size_out;
4922 const argtype *arg_type = ie->arg_type;
4923 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4926 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4930 assert(arg_type[0] == TYPE_PTR);
4931 assert(ie->access == IOC_RW);
4933 target_size_in = thunk_type_size(arg_type, 0);
4934 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4936 return -TARGET_EFAULT;
4938 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4939 unlock_user(argptr, arg, 0);
4940 fm = (struct fiemap *)buf_temp;
4941 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4942 return -TARGET_EINVAL;
4945 outbufsz = sizeof (*fm) +
4946 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4948 if (outbufsz > MAX_STRUCT_SIZE) {
4949 /* We can't fit all the extents into the fixed size buffer.
4950 * Allocate one that is large enough and use it instead.
4952 fm = g_try_malloc(outbufsz);
4954 return -TARGET_ENOMEM;
4956 memcpy(fm, buf_temp, sizeof(struct fiemap));
4959 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4960 if (!is_error(ret)) {
4961 target_size_out = target_size_in;
4962 /* An extent_count of 0 means we were only counting the extents
4963 * so there are no structs to copy
4965 if (fm->fm_extent_count != 0) {
4966 target_size_out += fm->fm_mapped_extents * extent_size;
4968 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4970 ret = -TARGET_EFAULT;
4972 /* Convert the struct fiemap */
4973 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4974 if (fm->fm_extent_count != 0) {
4975 p = argptr + target_size_in;
4976 /* ...and then all the struct fiemap_extents */
4977 for (i = 0; i < fm->fm_mapped_extents; i++) {
4978 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4983 unlock_user(argptr, arg, target_size_out);
4993 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4994 int fd, int cmd, abi_long arg)
4996 const argtype *arg_type = ie->arg_type;
5000 struct ifconf *host_ifconf;
5002 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5003 int target_ifreq_size;
5008 abi_long target_ifc_buf;
5012 assert(arg_type[0] == TYPE_PTR);
5013 assert(ie->access == IOC_RW);
5016 target_size = thunk_type_size(arg_type, 0);
5018 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5020 return -TARGET_EFAULT;
5021 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5022 unlock_user(argptr, arg, 0);
5024 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5025 target_ifc_len = host_ifconf->ifc_len;
5026 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5028 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5029 nb_ifreq = target_ifc_len / target_ifreq_size;
5030 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5032 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5033 if (outbufsz > MAX_STRUCT_SIZE) {
5034 /* We can't fit all the extents into the fixed size buffer.
5035 * Allocate one that is large enough and use it instead.
5037 host_ifconf = malloc(outbufsz);
5039 return -TARGET_ENOMEM;
5041 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5044 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5046 host_ifconf->ifc_len = host_ifc_len;
5047 host_ifconf->ifc_buf = host_ifc_buf;
5049 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5050 if (!is_error(ret)) {
5051 /* convert host ifc_len to target ifc_len */
5053 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5054 target_ifc_len = nb_ifreq * target_ifreq_size;
5055 host_ifconf->ifc_len = target_ifc_len;
5057 /* restore target ifc_buf */
5059 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5061 /* copy struct ifconf to target user */
5063 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5065 return -TARGET_EFAULT;
5066 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5067 unlock_user(argptr, arg, target_size);
5069 /* copy ifreq[] to target user */
5071 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5072 for (i = 0; i < nb_ifreq ; i++) {
5073 thunk_convert(argptr + i * target_ifreq_size,
5074 host_ifc_buf + i * sizeof(struct ifreq),
5075 ifreq_arg_type, THUNK_TARGET);
5077 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5087 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5088 int cmd, abi_long arg)
5091 struct dm_ioctl *host_dm;
5092 abi_long guest_data;
5093 uint32_t guest_data_size;
5095 const argtype *arg_type = ie->arg_type;
5097 void *big_buf = NULL;
5101 target_size = thunk_type_size(arg_type, 0);
5102 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5104 ret = -TARGET_EFAULT;
5107 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5108 unlock_user(argptr, arg, 0);
5110 /* buf_temp is too small, so fetch things into a bigger buffer */
5111 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5112 memcpy(big_buf, buf_temp, target_size);
5116 guest_data = arg + host_dm->data_start;
5117 if ((guest_data - arg) < 0) {
5118 ret = -TARGET_EINVAL;
5121 guest_data_size = host_dm->data_size - host_dm->data_start;
5122 host_data = (char*)host_dm + host_dm->data_start;
5124 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5126 ret = -TARGET_EFAULT;
5130 switch (ie->host_cmd) {
5132 case DM_LIST_DEVICES:
5135 case DM_DEV_SUSPEND:
5138 case DM_TABLE_STATUS:
5139 case DM_TABLE_CLEAR:
5141 case DM_LIST_VERSIONS:
5145 case DM_DEV_SET_GEOMETRY:
5146 /* data contains only strings */
5147 memcpy(host_data, argptr, guest_data_size);
5150 memcpy(host_data, argptr, guest_data_size);
5151 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5155 void *gspec = argptr;
5156 void *cur_data = host_data;
5157 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5158 int spec_size = thunk_type_size(arg_type, 0);
5161 for (i = 0; i < host_dm->target_count; i++) {
5162 struct dm_target_spec *spec = cur_data;
5166 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5167 slen = strlen((char*)gspec + spec_size) + 1;
5169 spec->next = sizeof(*spec) + slen;
5170 strcpy((char*)&spec[1], gspec + spec_size);
5172 cur_data += spec->next;
5177 ret = -TARGET_EINVAL;
5178 unlock_user(argptr, guest_data, 0);
5181 unlock_user(argptr, guest_data, 0);
5183 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5184 if (!is_error(ret)) {
5185 guest_data = arg + host_dm->data_start;
5186 guest_data_size = host_dm->data_size - host_dm->data_start;
5187 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5188 switch (ie->host_cmd) {
5193 case DM_DEV_SUSPEND:
5196 case DM_TABLE_CLEAR:
5198 case DM_DEV_SET_GEOMETRY:
5199 /* no return data */
5201 case DM_LIST_DEVICES:
5203 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5204 uint32_t remaining_data = guest_data_size;
5205 void *cur_data = argptr;
5206 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5207 int nl_size = 12; /* can't use thunk_size due to alignment */
5210 uint32_t next = nl->next;
5212 nl->next = nl_size + (strlen(nl->name) + 1);
5214 if (remaining_data < nl->next) {
5215 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5218 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5219 strcpy(cur_data + nl_size, nl->name);
5220 cur_data += nl->next;
5221 remaining_data -= nl->next;
5225 nl = (void*)nl + next;
5230 case DM_TABLE_STATUS:
5232 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5233 void *cur_data = argptr;
5234 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5235 int spec_size = thunk_type_size(arg_type, 0);
5238 for (i = 0; i < host_dm->target_count; i++) {
5239 uint32_t next = spec->next;
5240 int slen = strlen((char*)&spec[1]) + 1;
5241 spec->next = (cur_data - argptr) + spec_size + slen;
5242 if (guest_data_size < spec->next) {
5243 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5246 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5247 strcpy(cur_data + spec_size, (char*)&spec[1]);
5248 cur_data = argptr + spec->next;
5249 spec = (void*)host_dm + host_dm->data_start + next;
5255 void *hdata = (void*)host_dm + host_dm->data_start;
5256 int count = *(uint32_t*)hdata;
5257 uint64_t *hdev = hdata + 8;
5258 uint64_t *gdev = argptr + 8;
5261 *(uint32_t*)argptr = tswap32(count);
5262 for (i = 0; i < count; i++) {
5263 *gdev = tswap64(*hdev);
5269 case DM_LIST_VERSIONS:
5271 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5272 uint32_t remaining_data = guest_data_size;
5273 void *cur_data = argptr;
5274 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5275 int vers_size = thunk_type_size(arg_type, 0);
5278 uint32_t next = vers->next;
5280 vers->next = vers_size + (strlen(vers->name) + 1);
5282 if (remaining_data < vers->next) {
5283 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5286 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5287 strcpy(cur_data + vers_size, vers->name);
5288 cur_data += vers->next;
5289 remaining_data -= vers->next;
5293 vers = (void*)vers + next;
5298 unlock_user(argptr, guest_data, 0);
5299 ret = -TARGET_EINVAL;
5302 unlock_user(argptr, guest_data, guest_data_size);
5304 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5306 ret = -TARGET_EFAULT;
5309 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5310 unlock_user(argptr, arg, target_size);
5317 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5318 int cmd, abi_long arg)
5322 const argtype *arg_type = ie->arg_type;
5323 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5326 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5327 struct blkpg_partition host_part;
5329 /* Read and convert blkpg */
5331 target_size = thunk_type_size(arg_type, 0);
5332 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5334 ret = -TARGET_EFAULT;
5337 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5338 unlock_user(argptr, arg, 0);
5340 switch (host_blkpg->op) {
5341 case BLKPG_ADD_PARTITION:
5342 case BLKPG_DEL_PARTITION:
5343 /* payload is struct blkpg_partition */
5346 /* Unknown opcode */
5347 ret = -TARGET_EINVAL;
5351 /* Read and convert blkpg->data */
5352 arg = (abi_long)(uintptr_t)host_blkpg->data;
5353 target_size = thunk_type_size(part_arg_type, 0);
5354 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5356 ret = -TARGET_EFAULT;
5359 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5360 unlock_user(argptr, arg, 0);
5362 /* Swizzle the data pointer to our local copy and call! */
5363 host_blkpg->data = &host_part;
5364 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5370 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5371 int fd, int cmd, abi_long arg)
5373 const argtype *arg_type = ie->arg_type;
5374 const StructEntry *se;
5375 const argtype *field_types;
5376 const int *dst_offsets, *src_offsets;
5379 abi_ulong *target_rt_dev_ptr;
5380 unsigned long *host_rt_dev_ptr;
5384 assert(ie->access == IOC_W);
5385 assert(*arg_type == TYPE_PTR);
5387 assert(*arg_type == TYPE_STRUCT);
5388 target_size = thunk_type_size(arg_type, 0);
5389 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5391 return -TARGET_EFAULT;
5394 assert(*arg_type == (int)STRUCT_rtentry);
5395 se = struct_entries + *arg_type++;
5396 assert(se->convert[0] == NULL);
5397 /* convert struct here to be able to catch rt_dev string */
5398 field_types = se->field_types;
5399 dst_offsets = se->field_offsets[THUNK_HOST];
5400 src_offsets = se->field_offsets[THUNK_TARGET];
5401 for (i = 0; i < se->nb_fields; i++) {
5402 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5403 assert(*field_types == TYPE_PTRVOID);
5404 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5405 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5406 if (*target_rt_dev_ptr != 0) {
5407 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5408 tswapal(*target_rt_dev_ptr));
5409 if (!*host_rt_dev_ptr) {
5410 unlock_user(argptr, arg, 0);
5411 return -TARGET_EFAULT;
5414 *host_rt_dev_ptr = 0;
5419 field_types = thunk_convert(buf_temp + dst_offsets[i],
5420 argptr + src_offsets[i],
5421 field_types, THUNK_HOST);
5423 unlock_user(argptr, arg, 0);
5425 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5426 if (*host_rt_dev_ptr != 0) {
5427 unlock_user((void *)*host_rt_dev_ptr,
5428 *target_rt_dev_ptr, 0);
5433 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5434 int fd, int cmd, abi_long arg)
5436 int sig = target_to_host_signal(arg);
5437 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5440 static IOCTLEntry ioctl_entries[] = {
5441 #define IOCTL(cmd, access, ...) \
5442 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5443 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5444 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5449 /* ??? Implement proper locking for ioctls. */
5450 /* do_ioctl() Must return target values and target errnos. */
5451 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5453 const IOCTLEntry *ie;
5454 const argtype *arg_type;
5456 uint8_t buf_temp[MAX_STRUCT_SIZE];
5462 if (ie->target_cmd == 0) {
5463 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5464 return -TARGET_ENOSYS;
5466 if (ie->target_cmd == cmd)
5470 arg_type = ie->arg_type;
5472 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5475 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5478 switch(arg_type[0]) {
5481 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5485 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5489 target_size = thunk_type_size(arg_type, 0);
5490 switch(ie->access) {
5492 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5493 if (!is_error(ret)) {
5494 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5496 return -TARGET_EFAULT;
5497 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5498 unlock_user(argptr, arg, target_size);
5502 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5504 return -TARGET_EFAULT;
5505 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5506 unlock_user(argptr, arg, 0);
5507 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5511 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5513 return -TARGET_EFAULT;
5514 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5515 unlock_user(argptr, arg, 0);
5516 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5517 if (!is_error(ret)) {
5518 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5520 return -TARGET_EFAULT;
5521 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5522 unlock_user(argptr, arg, target_size);
5528 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5529 (long)cmd, arg_type[0]);
5530 ret = -TARGET_ENOSYS;
5536 static const bitmask_transtbl iflag_tbl[] = {
5537 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5538 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5539 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5540 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5541 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5542 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5543 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5544 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5545 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5546 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5547 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5548 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5549 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5550 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5554 static const bitmask_transtbl oflag_tbl[] = {
5555 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5556 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5557 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5558 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5559 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5560 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5561 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5562 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5563 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5564 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5565 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5566 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5567 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5568 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5569 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5570 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5571 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5572 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5573 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5574 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5575 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5576 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5577 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5578 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5582 static const bitmask_transtbl cflag_tbl[] = {
5583 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5584 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5585 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5586 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5587 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5588 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5589 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5590 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5591 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5592 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5593 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5594 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5595 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5596 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5597 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5598 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5599 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5600 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5601 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5602 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5603 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5604 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5605 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5606 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5607 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5608 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5609 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5610 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5611 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5612 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5613 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5617 static const bitmask_transtbl lflag_tbl[] = {
5618 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5619 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5620 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5621 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5622 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5623 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5624 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5625 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5626 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5627 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5628 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5629 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5630 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5631 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5632 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5636 static void target_to_host_termios (void *dst, const void *src)
5638 struct host_termios *host = dst;
5639 const struct target_termios *target = src;
5642 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5644 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5646 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5648 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5649 host->c_line = target->c_line;
5651 memset(host->c_cc, 0, sizeof(host->c_cc));
5652 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5653 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5654 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5655 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5656 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5657 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5658 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5659 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5660 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5661 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5662 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5663 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5664 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5665 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5666 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5667 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5668 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5671 static void host_to_target_termios (void *dst, const void *src)
5673 struct target_termios *target = dst;
5674 const struct host_termios *host = src;
5677 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5679 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5681 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5683 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5684 target->c_line = host->c_line;
5686 memset(target->c_cc, 0, sizeof(target->c_cc));
5687 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5688 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5689 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5690 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5691 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5692 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5693 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5694 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5695 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5696 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5697 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5698 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5699 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5700 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5701 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5702 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5703 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5706 static const StructEntry struct_termios_def = {
5707 .convert = { host_to_target_termios, target_to_host_termios },
5708 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5709 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5712 static bitmask_transtbl mmap_flags_tbl[] = {
5713 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5714 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5715 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5716 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5717 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5718 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5719 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5720 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5721 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5726 #if defined(TARGET_I386)
5728 /* NOTE: there is really one LDT for all the threads */
5729 static uint8_t *ldt_table;
5731 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5738 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5739 if (size > bytecount)
5741 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5743 return -TARGET_EFAULT;
5744 /* ??? Should this by byteswapped? */
5745 memcpy(p, ldt_table, size);
5746 unlock_user(p, ptr, size);
5750 /* XXX: add locking support */
5751 static abi_long write_ldt(CPUX86State *env,
5752 abi_ulong ptr, unsigned long bytecount, int oldmode)
5754 struct target_modify_ldt_ldt_s ldt_info;
5755 struct target_modify_ldt_ldt_s *target_ldt_info;
5756 int seg_32bit, contents, read_exec_only, limit_in_pages;
5757 int seg_not_present, useable, lm;
5758 uint32_t *lp, entry_1, entry_2;
5760 if (bytecount != sizeof(ldt_info))
5761 return -TARGET_EINVAL;
5762 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5763 return -TARGET_EFAULT;
5764 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5765 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5766 ldt_info.limit = tswap32(target_ldt_info->limit);
5767 ldt_info.flags = tswap32(target_ldt_info->flags);
5768 unlock_user_struct(target_ldt_info, ptr, 0);
5770 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5771 return -TARGET_EINVAL;
5772 seg_32bit = ldt_info.flags & 1;
5773 contents = (ldt_info.flags >> 1) & 3;
5774 read_exec_only = (ldt_info.flags >> 3) & 1;
5775 limit_in_pages = (ldt_info.flags >> 4) & 1;
5776 seg_not_present = (ldt_info.flags >> 5) & 1;
5777 useable = (ldt_info.flags >> 6) & 1;
5781 lm = (ldt_info.flags >> 7) & 1;
5783 if (contents == 3) {
5785 return -TARGET_EINVAL;
5786 if (seg_not_present == 0)
5787 return -TARGET_EINVAL;
5789 /* allocate the LDT */
5791 env->ldt.base = target_mmap(0,
5792 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5793 PROT_READ|PROT_WRITE,
5794 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5795 if (env->ldt.base == -1)
5796 return -TARGET_ENOMEM;
5797 memset(g2h(env->ldt.base), 0,
5798 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5799 env->ldt.limit = 0xffff;
5800 ldt_table = g2h(env->ldt.base);
5803 /* NOTE: same code as Linux kernel */
5804 /* Allow LDTs to be cleared by the user. */
5805 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5808 read_exec_only == 1 &&
5810 limit_in_pages == 0 &&
5811 seg_not_present == 1 &&
5819 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5820 (ldt_info.limit & 0x0ffff);
5821 entry_2 = (ldt_info.base_addr & 0xff000000) |
5822 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5823 (ldt_info.limit & 0xf0000) |
5824 ((read_exec_only ^ 1) << 9) |
5826 ((seg_not_present ^ 1) << 15) |
5828 (limit_in_pages << 23) |
5832 entry_2 |= (useable << 20);
5834 /* Install the new entry ... */
5836 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5837 lp[0] = tswap32(entry_1);
5838 lp[1] = tswap32(entry_2);
5842 /* specific and weird i386 syscalls */
5843 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5844 unsigned long bytecount)
5850 ret = read_ldt(ptr, bytecount);
5853 ret = write_ldt(env, ptr, bytecount, 1);
5856 ret = write_ldt(env, ptr, bytecount, 0);
5859 ret = -TARGET_ENOSYS;
5865 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5866 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5868 uint64_t *gdt_table = g2h(env->gdt.base);
5869 struct target_modify_ldt_ldt_s ldt_info;
5870 struct target_modify_ldt_ldt_s *target_ldt_info;
5871 int seg_32bit, contents, read_exec_only, limit_in_pages;
5872 int seg_not_present, useable, lm;
5873 uint32_t *lp, entry_1, entry_2;
5876 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5877 if (!target_ldt_info)
5878 return -TARGET_EFAULT;
5879 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5880 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5881 ldt_info.limit = tswap32(target_ldt_info->limit);
5882 ldt_info.flags = tswap32(target_ldt_info->flags);
5883 if (ldt_info.entry_number == -1) {
5884 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5885 if (gdt_table[i] == 0) {
5886 ldt_info.entry_number = i;
5887 target_ldt_info->entry_number = tswap32(i);
5892 unlock_user_struct(target_ldt_info, ptr, 1);
5894 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5895 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5896 return -TARGET_EINVAL;
5897 seg_32bit = ldt_info.flags & 1;
5898 contents = (ldt_info.flags >> 1) & 3;
5899 read_exec_only = (ldt_info.flags >> 3) & 1;
5900 limit_in_pages = (ldt_info.flags >> 4) & 1;
5901 seg_not_present = (ldt_info.flags >> 5) & 1;
5902 useable = (ldt_info.flags >> 6) & 1;
5906 lm = (ldt_info.flags >> 7) & 1;
5909 if (contents == 3) {
5910 if (seg_not_present == 0)
5911 return -TARGET_EINVAL;
5914 /* NOTE: same code as Linux kernel */
5915 /* Allow LDTs to be cleared by the user. */
5916 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5917 if ((contents == 0 &&
5918 read_exec_only == 1 &&
5920 limit_in_pages == 0 &&
5921 seg_not_present == 1 &&
5929 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5930 (ldt_info.limit & 0x0ffff);
5931 entry_2 = (ldt_info.base_addr & 0xff000000) |
5932 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5933 (ldt_info.limit & 0xf0000) |
5934 ((read_exec_only ^ 1) << 9) |
5936 ((seg_not_present ^ 1) << 15) |
5938 (limit_in_pages << 23) |
5943 /* Install the new entry ... */
5945 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5946 lp[0] = tswap32(entry_1);
5947 lp[1] = tswap32(entry_2);
5951 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5953 struct target_modify_ldt_ldt_s *target_ldt_info;
5954 uint64_t *gdt_table = g2h(env->gdt.base);
5955 uint32_t base_addr, limit, flags;
5956 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5957 int seg_not_present, useable, lm;
5958 uint32_t *lp, entry_1, entry_2;
5960 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5961 if (!target_ldt_info)
5962 return -TARGET_EFAULT;
5963 idx = tswap32(target_ldt_info->entry_number);
5964 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5965 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5966 unlock_user_struct(target_ldt_info, ptr, 1);
5967 return -TARGET_EINVAL;
5969 lp = (uint32_t *)(gdt_table + idx);
5970 entry_1 = tswap32(lp[0]);
5971 entry_2 = tswap32(lp[1]);
5973 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5974 contents = (entry_2 >> 10) & 3;
5975 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5976 seg_32bit = (entry_2 >> 22) & 1;
5977 limit_in_pages = (entry_2 >> 23) & 1;
5978 useable = (entry_2 >> 20) & 1;
5982 lm = (entry_2 >> 21) & 1;
5984 flags = (seg_32bit << 0) | (contents << 1) |
5985 (read_exec_only << 3) | (limit_in_pages << 4) |
5986 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5987 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5988 base_addr = (entry_1 >> 16) |
5989 (entry_2 & 0xff000000) |
5990 ((entry_2 & 0xff) << 16);
5991 target_ldt_info->base_addr = tswapal(base_addr);
5992 target_ldt_info->limit = tswap32(limit);
5993 target_ldt_info->flags = tswap32(flags);
5994 unlock_user_struct(target_ldt_info, ptr, 1);
5997 #endif /* TARGET_I386 && TARGET_ABI32 */
5999 #ifndef TARGET_ABI32
6000 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6007 case TARGET_ARCH_SET_GS:
6008 case TARGET_ARCH_SET_FS:
6009 if (code == TARGET_ARCH_SET_GS)
6013 cpu_x86_load_seg(env, idx, 0);
6014 env->segs[idx].base = addr;
6016 case TARGET_ARCH_GET_GS:
6017 case TARGET_ARCH_GET_FS:
6018 if (code == TARGET_ARCH_GET_GS)
6022 val = env->segs[idx].base;
6023 if (put_user(val, addr, abi_ulong))
6024 ret = -TARGET_EFAULT;
6027 ret = -TARGET_EINVAL;
6034 #endif /* defined(TARGET_I386) */
6036 #define NEW_STACK_SIZE 0x40000
6039 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6042 pthread_mutex_t mutex;
6043 pthread_cond_t cond;
6046 abi_ulong child_tidptr;
6047 abi_ulong parent_tidptr;
6051 static void *clone_func(void *arg)
6053 new_thread_info *info = arg;
6058 rcu_register_thread();
6060 cpu = ENV_GET_CPU(env);
6062 ts = (TaskState *)cpu->opaque;
6063 info->tid = gettid();
6064 cpu->host_tid = info->tid;
6066 if (info->child_tidptr)
6067 put_user_u32(info->tid, info->child_tidptr);
6068 if (info->parent_tidptr)
6069 put_user_u32(info->tid, info->parent_tidptr);
6070 /* Enable signals. */
6071 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6072 /* Signal to the parent that we're ready. */
6073 pthread_mutex_lock(&info->mutex);
6074 pthread_cond_broadcast(&info->cond);
6075 pthread_mutex_unlock(&info->mutex);
6076 /* Wait until the parent has finshed initializing the tls state. */
6077 pthread_mutex_lock(&clone_lock);
6078 pthread_mutex_unlock(&clone_lock);
6084 /* do_fork() Must return host values and target errnos (unlike most
6085 do_*() functions). */
6086 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6087 abi_ulong parent_tidptr, target_ulong newtls,
6088 abi_ulong child_tidptr)
6090 CPUState *cpu = ENV_GET_CPU(env);
6094 CPUArchState *new_env;
6097 flags &= ~CLONE_IGNORED_FLAGS;
6099 /* Emulate vfork() with fork() */
6100 if (flags & CLONE_VFORK)
6101 flags &= ~(CLONE_VFORK | CLONE_VM);
6103 if (flags & CLONE_VM) {
6104 TaskState *parent_ts = (TaskState *)cpu->opaque;
6105 new_thread_info info;
6106 pthread_attr_t attr;
6108 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6109 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6110 return -TARGET_EINVAL;
6113 ts = g_new0(TaskState, 1);
6114 init_task_state(ts);
6115 /* we create a new CPU instance. */
6116 new_env = cpu_copy(env);
6117 /* Init regs that differ from the parent. */
6118 cpu_clone_regs(new_env, newsp);
6119 new_cpu = ENV_GET_CPU(new_env);
6120 new_cpu->opaque = ts;
6121 ts->bprm = parent_ts->bprm;
6122 ts->info = parent_ts->info;
6123 ts->signal_mask = parent_ts->signal_mask;
6125 if (flags & CLONE_CHILD_CLEARTID) {
6126 ts->child_tidptr = child_tidptr;
6129 if (flags & CLONE_SETTLS) {
6130 cpu_set_tls (new_env, newtls);
6133 /* Grab a mutex so that thread setup appears atomic. */
6134 pthread_mutex_lock(&clone_lock);
6136 memset(&info, 0, sizeof(info));
6137 pthread_mutex_init(&info.mutex, NULL);
6138 pthread_mutex_lock(&info.mutex);
6139 pthread_cond_init(&info.cond, NULL);
6141 if (flags & CLONE_CHILD_SETTID) {
6142 info.child_tidptr = child_tidptr;
6144 if (flags & CLONE_PARENT_SETTID) {
6145 info.parent_tidptr = parent_tidptr;
6148 ret = pthread_attr_init(&attr);
6149 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6150 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6151 /* It is not safe to deliver signals until the child has finished
6152 initializing, so temporarily block all signals. */
6153 sigfillset(&sigmask);
6154 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6156 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6157 /* TODO: Free new CPU state if thread creation failed. */
6159 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6160 pthread_attr_destroy(&attr);
6162 /* Wait for the child to initialize. */
6163 pthread_cond_wait(&info.cond, &info.mutex);
6168 pthread_mutex_unlock(&info.mutex);
6169 pthread_cond_destroy(&info.cond);
6170 pthread_mutex_destroy(&info.mutex);
6171 pthread_mutex_unlock(&clone_lock);
6173 /* if no CLONE_VM, we consider it is a fork */
6174 if (flags & CLONE_INVALID_FORK_FLAGS) {
6175 return -TARGET_EINVAL;
6178 /* We can't support custom termination signals */
6179 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6180 return -TARGET_EINVAL;
6183 if (block_signals()) {
6184 return -TARGET_ERESTARTSYS;
6190 /* Child Process. */
6192 cpu_clone_regs(env, newsp);
6194 /* There is a race condition here. The parent process could
6195 theoretically read the TID in the child process before the child
6196 tid is set. This would require using either ptrace
6197 (not implemented) or having *_tidptr to point at a shared memory
6198 mapping. We can't repeat the spinlock hack used above because
6199 the child process gets its own copy of the lock. */
6200 if (flags & CLONE_CHILD_SETTID)
6201 put_user_u32(gettid(), child_tidptr);
6202 if (flags & CLONE_PARENT_SETTID)
6203 put_user_u32(gettid(), parent_tidptr);
6204 ts = (TaskState *)cpu->opaque;
6205 if (flags & CLONE_SETTLS)
6206 cpu_set_tls (env, newtls);
6207 if (flags & CLONE_CHILD_CLEARTID)
6208 ts->child_tidptr = child_tidptr;
6216 /* warning : doesn't handle linux specific flags... */
6217 static int target_to_host_fcntl_cmd(int cmd)
6220 case TARGET_F_DUPFD:
6221 case TARGET_F_GETFD:
6222 case TARGET_F_SETFD:
6223 case TARGET_F_GETFL:
6224 case TARGET_F_SETFL:
6226 case TARGET_F_GETLK:
6228 case TARGET_F_SETLK:
6230 case TARGET_F_SETLKW:
6232 case TARGET_F_GETOWN:
6234 case TARGET_F_SETOWN:
6236 case TARGET_F_GETSIG:
6238 case TARGET_F_SETSIG:
6240 #if TARGET_ABI_BITS == 32
6241 case TARGET_F_GETLK64:
6243 case TARGET_F_SETLK64:
6245 case TARGET_F_SETLKW64:
6248 case TARGET_F_SETLEASE:
6250 case TARGET_F_GETLEASE:
6252 #ifdef F_DUPFD_CLOEXEC
6253 case TARGET_F_DUPFD_CLOEXEC:
6254 return F_DUPFD_CLOEXEC;
6256 case TARGET_F_NOTIFY:
6259 case TARGET_F_GETOWN_EX:
6263 case TARGET_F_SETOWN_EX:
6267 case TARGET_F_SETPIPE_SZ:
6268 return F_SETPIPE_SZ;
6269 case TARGET_F_GETPIPE_SZ:
6270 return F_GETPIPE_SZ;
6273 return -TARGET_EINVAL;
6275 return -TARGET_EINVAL;
6278 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6279 static const bitmask_transtbl flock_tbl[] = {
6280 TRANSTBL_CONVERT(F_RDLCK),
6281 TRANSTBL_CONVERT(F_WRLCK),
6282 TRANSTBL_CONVERT(F_UNLCK),
6283 TRANSTBL_CONVERT(F_EXLCK),
6284 TRANSTBL_CONVERT(F_SHLCK),
6288 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6289 abi_ulong target_flock_addr)
6291 struct target_flock *target_fl;
6294 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6295 return -TARGET_EFAULT;
6298 __get_user(l_type, &target_fl->l_type);
6299 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6300 __get_user(fl->l_whence, &target_fl->l_whence);
6301 __get_user(fl->l_start, &target_fl->l_start);
6302 __get_user(fl->l_len, &target_fl->l_len);
6303 __get_user(fl->l_pid, &target_fl->l_pid);
6304 unlock_user_struct(target_fl, target_flock_addr, 0);
6308 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6309 const struct flock64 *fl)
6311 struct target_flock *target_fl;
6314 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6315 return -TARGET_EFAULT;
6318 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6319 __put_user(l_type, &target_fl->l_type);
6320 __put_user(fl->l_whence, &target_fl->l_whence);
6321 __put_user(fl->l_start, &target_fl->l_start);
6322 __put_user(fl->l_len, &target_fl->l_len);
6323 __put_user(fl->l_pid, &target_fl->l_pid);
6324 unlock_user_struct(target_fl, target_flock_addr, 1);
6328 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6329 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6331 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6332 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6333 abi_ulong target_flock_addr)
6335 struct target_eabi_flock64 *target_fl;
6338 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6339 return -TARGET_EFAULT;
6342 __get_user(l_type, &target_fl->l_type);
6343 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6344 __get_user(fl->l_whence, &target_fl->l_whence);
6345 __get_user(fl->l_start, &target_fl->l_start);
6346 __get_user(fl->l_len, &target_fl->l_len);
6347 __get_user(fl->l_pid, &target_fl->l_pid);
6348 unlock_user_struct(target_fl, target_flock_addr, 0);
6352 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6353 const struct flock64 *fl)
6355 struct target_eabi_flock64 *target_fl;
6358 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6359 return -TARGET_EFAULT;
6362 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6363 __put_user(l_type, &target_fl->l_type);
6364 __put_user(fl->l_whence, &target_fl->l_whence);
6365 __put_user(fl->l_start, &target_fl->l_start);
6366 __put_user(fl->l_len, &target_fl->l_len);
6367 __put_user(fl->l_pid, &target_fl->l_pid);
6368 unlock_user_struct(target_fl, target_flock_addr, 1);
6373 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6374 abi_ulong target_flock_addr)
6376 struct target_flock64 *target_fl;
6379 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6380 return -TARGET_EFAULT;
6383 __get_user(l_type, &target_fl->l_type);
6384 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6385 __get_user(fl->l_whence, &target_fl->l_whence);
6386 __get_user(fl->l_start, &target_fl->l_start);
6387 __get_user(fl->l_len, &target_fl->l_len);
6388 __get_user(fl->l_pid, &target_fl->l_pid);
6389 unlock_user_struct(target_fl, target_flock_addr, 0);
6393 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6394 const struct flock64 *fl)
6396 struct target_flock64 *target_fl;
6399 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6400 return -TARGET_EFAULT;
6403 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6404 __put_user(l_type, &target_fl->l_type);
6405 __put_user(fl->l_whence, &target_fl->l_whence);
6406 __put_user(fl->l_start, &target_fl->l_start);
6407 __put_user(fl->l_len, &target_fl->l_len);
6408 __put_user(fl->l_pid, &target_fl->l_pid);
6409 unlock_user_struct(target_fl, target_flock_addr, 1);
6413 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6415 struct flock64 fl64;
6417 struct f_owner_ex fox;
6418 struct target_f_owner_ex *target_fox;
6421 int host_cmd = target_to_host_fcntl_cmd(cmd);
6423 if (host_cmd == -TARGET_EINVAL)
6427 case TARGET_F_GETLK:
6428 ret = copy_from_user_flock(&fl64, arg);
6432 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6434 ret = copy_to_user_flock(arg, &fl64);
6438 case TARGET_F_SETLK:
6439 case TARGET_F_SETLKW:
6440 ret = copy_from_user_flock(&fl64, arg);
6444 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6447 case TARGET_F_GETLK64:
6448 ret = copy_from_user_flock64(&fl64, arg);
6452 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6454 ret = copy_to_user_flock64(arg, &fl64);
6457 case TARGET_F_SETLK64:
6458 case TARGET_F_SETLKW64:
6459 ret = copy_from_user_flock64(&fl64, arg);
6463 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6466 case TARGET_F_GETFL:
6467 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6469 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6473 case TARGET_F_SETFL:
6474 ret = get_errno(safe_fcntl(fd, host_cmd,
6475 target_to_host_bitmask(arg,
6480 case TARGET_F_GETOWN_EX:
6481 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6483 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6484 return -TARGET_EFAULT;
6485 target_fox->type = tswap32(fox.type);
6486 target_fox->pid = tswap32(fox.pid);
6487 unlock_user_struct(target_fox, arg, 1);
6493 case TARGET_F_SETOWN_EX:
6494 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6495 return -TARGET_EFAULT;
6496 fox.type = tswap32(target_fox->type);
6497 fox.pid = tswap32(target_fox->pid);
6498 unlock_user_struct(target_fox, arg, 0);
6499 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6503 case TARGET_F_SETOWN:
6504 case TARGET_F_GETOWN:
6505 case TARGET_F_SETSIG:
6506 case TARGET_F_GETSIG:
6507 case TARGET_F_SETLEASE:
6508 case TARGET_F_GETLEASE:
6509 case TARGET_F_SETPIPE_SZ:
6510 case TARGET_F_GETPIPE_SZ:
6511 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6515 ret = get_errno(safe_fcntl(fd, cmd, arg));
6523 static inline int high2lowuid(int uid)
6531 static inline int high2lowgid(int gid)
6539 static inline int low2highuid(int uid)
6541 if ((int16_t)uid == -1)
6547 static inline int low2highgid(int gid)
6549 if ((int16_t)gid == -1)
6554 static inline int tswapid(int id)
6559 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6561 #else /* !USE_UID16 */
6562 static inline int high2lowuid(int uid)
6566 static inline int high2lowgid(int gid)
6570 static inline int low2highuid(int uid)
6574 static inline int low2highgid(int gid)
6578 static inline int tswapid(int id)
6583 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6585 #endif /* USE_UID16 */
6587 /* We must do direct syscalls for setting UID/GID, because we want to
6588 * implement the Linux system call semantics of "change only for this thread",
6589 * not the libc/POSIX semantics of "change for all threads in process".
6590 * (See http://ewontfix.com/17/ for more details.)
6591 * We use the 32-bit version of the syscalls if present; if it is not
6592 * then either the host architecture supports 32-bit UIDs natively with
6593 * the standard syscall, or the 16-bit UID is the best we can do.
6595 #ifdef __NR_setuid32
6596 #define __NR_sys_setuid __NR_setuid32
6598 #define __NR_sys_setuid __NR_setuid
6600 #ifdef __NR_setgid32
6601 #define __NR_sys_setgid __NR_setgid32
6603 #define __NR_sys_setgid __NR_setgid
6605 #ifdef __NR_setresuid32
6606 #define __NR_sys_setresuid __NR_setresuid32
6608 #define __NR_sys_setresuid __NR_setresuid
6610 #ifdef __NR_setresgid32
6611 #define __NR_sys_setresgid __NR_setresgid32
6613 #define __NR_sys_setresgid __NR_setresgid
6616 _syscall1(int, sys_setuid, uid_t, uid)
6617 _syscall1(int, sys_setgid, gid_t, gid)
6618 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6619 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6621 void syscall_init(void)
6624 const argtype *arg_type;
6628 thunk_init(STRUCT_MAX);
6630 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6631 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6632 #include "syscall_types.h"
6634 #undef STRUCT_SPECIAL
6636 /* Build target_to_host_errno_table[] table from
6637 * host_to_target_errno_table[]. */
6638 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6639 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6642 /* we patch the ioctl size if necessary. We rely on the fact that
6643 no ioctl has all the bits at '1' in the size field */
6645 while (ie->target_cmd != 0) {
6646 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6647 TARGET_IOC_SIZEMASK) {
6648 arg_type = ie->arg_type;
6649 if (arg_type[0] != TYPE_PTR) {
6650 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6655 size = thunk_type_size(arg_type, 0);
6656 ie->target_cmd = (ie->target_cmd &
6657 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6658 (size << TARGET_IOC_SIZESHIFT);
6661 /* automatic consistency check if same arch */
6662 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6663 (defined(__x86_64__) && defined(TARGET_X86_64))
6664 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6665 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6666 ie->name, ie->target_cmd, ie->host_cmd);
6673 #if TARGET_ABI_BITS == 32
6674 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6676 #ifdef TARGET_WORDS_BIGENDIAN
6677 return ((uint64_t)word0 << 32) | word1;
6679 return ((uint64_t)word1 << 32) | word0;
6682 #else /* TARGET_ABI_BITS == 32 */
6683 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6687 #endif /* TARGET_ABI_BITS != 32 */
6689 #ifdef TARGET_NR_truncate64
6690 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6695 if (regpairs_aligned(cpu_env)) {
6699 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6703 #ifdef TARGET_NR_ftruncate64
6704 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6709 if (regpairs_aligned(cpu_env)) {
6713 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6717 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6718 abi_ulong target_addr)
6720 struct target_timespec *target_ts;
6722 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6723 return -TARGET_EFAULT;
6724 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6725 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6726 unlock_user_struct(target_ts, target_addr, 0);
6730 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6731 struct timespec *host_ts)
6733 struct target_timespec *target_ts;
6735 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6736 return -TARGET_EFAULT;
6737 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6738 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6739 unlock_user_struct(target_ts, target_addr, 1);
6743 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6744 abi_ulong target_addr)
6746 struct target_itimerspec *target_itspec;
6748 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6749 return -TARGET_EFAULT;
6752 host_itspec->it_interval.tv_sec =
6753 tswapal(target_itspec->it_interval.tv_sec);
6754 host_itspec->it_interval.tv_nsec =
6755 tswapal(target_itspec->it_interval.tv_nsec);
6756 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6757 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6759 unlock_user_struct(target_itspec, target_addr, 1);
6763 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6764 struct itimerspec *host_its)
6766 struct target_itimerspec *target_itspec;
6768 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6769 return -TARGET_EFAULT;
6772 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6773 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6775 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6776 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6778 unlock_user_struct(target_itspec, target_addr, 0);
6782 static inline abi_long target_to_host_timex(struct timex *host_tx,
6783 abi_long target_addr)
6785 struct target_timex *target_tx;
6787 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6788 return -TARGET_EFAULT;
6791 __get_user(host_tx->modes, &target_tx->modes);
6792 __get_user(host_tx->offset, &target_tx->offset);
6793 __get_user(host_tx->freq, &target_tx->freq);
6794 __get_user(host_tx->maxerror, &target_tx->maxerror);
6795 __get_user(host_tx->esterror, &target_tx->esterror);
6796 __get_user(host_tx->status, &target_tx->status);
6797 __get_user(host_tx->constant, &target_tx->constant);
6798 __get_user(host_tx->precision, &target_tx->precision);
6799 __get_user(host_tx->tolerance, &target_tx->tolerance);
6800 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6801 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6802 __get_user(host_tx->tick, &target_tx->tick);
6803 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6804 __get_user(host_tx->jitter, &target_tx->jitter);
6805 __get_user(host_tx->shift, &target_tx->shift);
6806 __get_user(host_tx->stabil, &target_tx->stabil);
6807 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6808 __get_user(host_tx->calcnt, &target_tx->calcnt);
6809 __get_user(host_tx->errcnt, &target_tx->errcnt);
6810 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6811 __get_user(host_tx->tai, &target_tx->tai);
6813 unlock_user_struct(target_tx, target_addr, 0);
6817 static inline abi_long host_to_target_timex(abi_long target_addr,
6818 struct timex *host_tx)
6820 struct target_timex *target_tx;
6822 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6823 return -TARGET_EFAULT;
6826 __put_user(host_tx->modes, &target_tx->modes);
6827 __put_user(host_tx->offset, &target_tx->offset);
6828 __put_user(host_tx->freq, &target_tx->freq);
6829 __put_user(host_tx->maxerror, &target_tx->maxerror);
6830 __put_user(host_tx->esterror, &target_tx->esterror);
6831 __put_user(host_tx->status, &target_tx->status);
6832 __put_user(host_tx->constant, &target_tx->constant);
6833 __put_user(host_tx->precision, &target_tx->precision);
6834 __put_user(host_tx->tolerance, &target_tx->tolerance);
6835 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6836 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6837 __put_user(host_tx->tick, &target_tx->tick);
6838 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6839 __put_user(host_tx->jitter, &target_tx->jitter);
6840 __put_user(host_tx->shift, &target_tx->shift);
6841 __put_user(host_tx->stabil, &target_tx->stabil);
6842 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6843 __put_user(host_tx->calcnt, &target_tx->calcnt);
6844 __put_user(host_tx->errcnt, &target_tx->errcnt);
6845 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6846 __put_user(host_tx->tai, &target_tx->tai);
6848 unlock_user_struct(target_tx, target_addr, 1);
6853 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6854 abi_ulong target_addr)
6856 struct target_sigevent *target_sevp;
6858 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6859 return -TARGET_EFAULT;
6862 /* This union is awkward on 64 bit systems because it has a 32 bit
6863 * integer and a pointer in it; we follow the conversion approach
6864 * used for handling sigval types in signal.c so the guest should get
6865 * the correct value back even if we did a 64 bit byteswap and it's
6866 * using the 32 bit integer.
6868 host_sevp->sigev_value.sival_ptr =
6869 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6870 host_sevp->sigev_signo =
6871 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6872 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6873 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6875 unlock_user_struct(target_sevp, target_addr, 1);
6879 #if defined(TARGET_NR_mlockall)
6880 static inline int target_to_host_mlockall_arg(int arg)
6884 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6885 result |= MCL_CURRENT;
6887 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6888 result |= MCL_FUTURE;
6894 static inline abi_long host_to_target_stat64(void *cpu_env,
6895 abi_ulong target_addr,
6896 struct stat *host_st)
6898 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6899 if (((CPUARMState *)cpu_env)->eabi) {
6900 struct target_eabi_stat64 *target_st;
6902 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6903 return -TARGET_EFAULT;
6904 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6905 __put_user(host_st->st_dev, &target_st->st_dev);
6906 __put_user(host_st->st_ino, &target_st->st_ino);
6907 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6908 __put_user(host_st->st_ino, &target_st->__st_ino);
6910 __put_user(host_st->st_mode, &target_st->st_mode);
6911 __put_user(host_st->st_nlink, &target_st->st_nlink);
6912 __put_user(host_st->st_uid, &target_st->st_uid);
6913 __put_user(host_st->st_gid, &target_st->st_gid);
6914 __put_user(host_st->st_rdev, &target_st->st_rdev);
6915 __put_user(host_st->st_size, &target_st->st_size);
6916 __put_user(host_st->st_blksize, &target_st->st_blksize);
6917 __put_user(host_st->st_blocks, &target_st->st_blocks);
6918 __put_user(host_st->st_atime, &target_st->target_st_atime);
6919 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6920 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6921 unlock_user_struct(target_st, target_addr, 1);
6925 #if defined(TARGET_HAS_STRUCT_STAT64)
6926 struct target_stat64 *target_st;
6928 struct target_stat *target_st;
6931 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6932 return -TARGET_EFAULT;
6933 memset(target_st, 0, sizeof(*target_st));
6934 __put_user(host_st->st_dev, &target_st->st_dev);
6935 __put_user(host_st->st_ino, &target_st->st_ino);
6936 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6937 __put_user(host_st->st_ino, &target_st->__st_ino);
6939 __put_user(host_st->st_mode, &target_st->st_mode);
6940 __put_user(host_st->st_nlink, &target_st->st_nlink);
6941 __put_user(host_st->st_uid, &target_st->st_uid);
6942 __put_user(host_st->st_gid, &target_st->st_gid);
6943 __put_user(host_st->st_rdev, &target_st->st_rdev);
6944 /* XXX: better use of kernel struct */
6945 __put_user(host_st->st_size, &target_st->st_size);
6946 __put_user(host_st->st_blksize, &target_st->st_blksize);
6947 __put_user(host_st->st_blocks, &target_st->st_blocks);
6948 __put_user(host_st->st_atime, &target_st->target_st_atime);
6949 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6950 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6951 unlock_user_struct(target_st, target_addr, 1);
6957 /* ??? Using host futex calls even when target atomic operations
6958 are not really atomic probably breaks things. However implementing
6959 futexes locally would make futexes shared between multiple processes
6960 tricky. However they're probably useless because guest atomic
6961 operations won't work either. */
6962 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6963 target_ulong uaddr2, int val3)
6965 struct timespec ts, *pts;
6968 /* ??? We assume FUTEX_* constants are the same on both host
6970 #ifdef FUTEX_CMD_MASK
6971 base_op = op & FUTEX_CMD_MASK;
6977 case FUTEX_WAIT_BITSET:
6980 target_to_host_timespec(pts, timeout);
6984 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6987 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6989 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6991 case FUTEX_CMP_REQUEUE:
6993 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6994 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6995 But the prototype takes a `struct timespec *'; insert casts
6996 to satisfy the compiler. We do not need to tswap TIMEOUT
6997 since it's not compared to guest memory. */
6998 pts = (struct timespec *)(uintptr_t) timeout;
6999 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7001 (base_op == FUTEX_CMP_REQUEUE
7005 return -TARGET_ENOSYS;
7008 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7009 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7010 abi_long handle, abi_long mount_id,
7013 struct file_handle *target_fh;
7014 struct file_handle *fh;
7018 unsigned int size, total_size;
7020 if (get_user_s32(size, handle)) {
7021 return -TARGET_EFAULT;
7024 name = lock_user_string(pathname);
7026 return -TARGET_EFAULT;
7029 total_size = sizeof(struct file_handle) + size;
7030 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7032 unlock_user(name, pathname, 0);
7033 return -TARGET_EFAULT;
7036 fh = g_malloc0(total_size);
7037 fh->handle_bytes = size;
7039 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7040 unlock_user(name, pathname, 0);
7042 /* man name_to_handle_at(2):
7043 * Other than the use of the handle_bytes field, the caller should treat
7044 * the file_handle structure as an opaque data type
7047 memcpy(target_fh, fh, total_size);
7048 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7049 target_fh->handle_type = tswap32(fh->handle_type);
7051 unlock_user(target_fh, handle, total_size);
7053 if (put_user_s32(mid, mount_id)) {
7054 return -TARGET_EFAULT;
7062 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7063 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7066 struct file_handle *target_fh;
7067 struct file_handle *fh;
7068 unsigned int size, total_size;
7071 if (get_user_s32(size, handle)) {
7072 return -TARGET_EFAULT;
7075 total_size = sizeof(struct file_handle) + size;
7076 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7078 return -TARGET_EFAULT;
7081 fh = g_memdup(target_fh, total_size);
7082 fh->handle_bytes = size;
7083 fh->handle_type = tswap32(target_fh->handle_type);
7085 ret = get_errno(open_by_handle_at(mount_fd, fh,
7086 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7090 unlock_user(target_fh, handle, total_size);
7096 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7098 /* signalfd siginfo conversion */
7101 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7102 const struct signalfd_siginfo *info)
7104 int sig = host_to_target_signal(info->ssi_signo);
7106 /* linux/signalfd.h defines a ssi_addr_lsb
7107 * not defined in sys/signalfd.h but used by some kernels
7110 #ifdef BUS_MCEERR_AO
7111 if (tinfo->ssi_signo == SIGBUS &&
7112 (tinfo->ssi_code == BUS_MCEERR_AR ||
7113 tinfo->ssi_code == BUS_MCEERR_AO)) {
7114 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7115 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7116 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7120 tinfo->ssi_signo = tswap32(sig);
7121 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7122 tinfo->ssi_code = tswap32(info->ssi_code);
7123 tinfo->ssi_pid = tswap32(info->ssi_pid);
7124 tinfo->ssi_uid = tswap32(info->ssi_uid);
7125 tinfo->ssi_fd = tswap32(info->ssi_fd);
7126 tinfo->ssi_tid = tswap32(info->ssi_tid);
7127 tinfo->ssi_band = tswap32(info->ssi_band);
7128 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7129 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7130 tinfo->ssi_status = tswap32(info->ssi_status);
7131 tinfo->ssi_int = tswap32(info->ssi_int);
7132 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7133 tinfo->ssi_utime = tswap64(info->ssi_utime);
7134 tinfo->ssi_stime = tswap64(info->ssi_stime);
7135 tinfo->ssi_addr = tswap64(info->ssi_addr);
7138 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7142 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7143 host_to_target_signalfd_siginfo(buf + i, buf + i);
7149 static TargetFdTrans target_signalfd_trans = {
7150 .host_to_target_data = host_to_target_data_signalfd,
7153 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7156 target_sigset_t *target_mask;
7160 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7161 return -TARGET_EINVAL;
7163 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7164 return -TARGET_EFAULT;
7167 target_to_host_sigset(&host_mask, target_mask);
7169 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7171 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7173 fd_trans_register(ret, &target_signalfd_trans);
7176 unlock_user_struct(target_mask, mask, 0);
7182 /* Map host to target signal numbers for the wait family of syscalls.
7183 Assume all other status bits are the same. */
7184 int host_to_target_waitstatus(int status)
7186 if (WIFSIGNALED(status)) {
7187 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7189 if (WIFSTOPPED(status)) {
7190 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7196 static int open_self_cmdline(void *cpu_env, int fd)
7199 bool word_skipped = false;
7201 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7211 nb_read = read(fd_orig, buf, sizeof(buf));
7214 fd_orig = close(fd_orig);
7217 } else if (nb_read == 0) {
7221 if (!word_skipped) {
7222 /* Skip the first string, which is the path to qemu-*-static
7223 instead of the actual command. */
7224 cp_buf = memchr(buf, 0, nb_read);
7226 /* Null byte found, skip one string */
7228 nb_read -= cp_buf - buf;
7229 word_skipped = true;
7234 if (write(fd, cp_buf, nb_read) != nb_read) {
7243 return close(fd_orig);
7246 static int open_self_maps(void *cpu_env, int fd)
7248 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7249 TaskState *ts = cpu->opaque;
7255 fp = fopen("/proc/self/maps", "r");
7260 while ((read = getline(&line, &len, fp)) != -1) {
7261 int fields, dev_maj, dev_min, inode;
7262 uint64_t min, max, offset;
7263 char flag_r, flag_w, flag_x, flag_p;
7264 char path[512] = "";
7265 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7266 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7267 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7269 if ((fields < 10) || (fields > 11)) {
7272 if (h2g_valid(min)) {
7273 int flags = page_get_flags(h2g(min));
7274 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7275 if (page_check_range(h2g(min), max - min, flags) == -1) {
7278 if (h2g(min) == ts->info->stack_limit) {
7279 pstrcpy(path, sizeof(path), " [stack]");
7281 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7282 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7283 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7284 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7285 path[0] ? " " : "", path);
7295 static int open_self_stat(void *cpu_env, int fd)
7297 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7298 TaskState *ts = cpu->opaque;
7299 abi_ulong start_stack = ts->info->start_stack;
7302 for (i = 0; i < 44; i++) {
7310 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7311 } else if (i == 1) {
7313 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7314 } else if (i == 27) {
7317 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7319 /* for the rest, there is MasterCard */
7320 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7324 if (write(fd, buf, len) != len) {
7332 static int open_self_auxv(void *cpu_env, int fd)
7334 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7335 TaskState *ts = cpu->opaque;
7336 abi_ulong auxv = ts->info->saved_auxv;
7337 abi_ulong len = ts->info->auxv_len;
7341 * Auxiliary vector is stored in target process stack.
7342 * read in whole auxv vector and copy it to file
7344 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7348 r = write(fd, ptr, len);
7355 lseek(fd, 0, SEEK_SET);
7356 unlock_user(ptr, auxv, len);
7362 static int is_proc_myself(const char *filename, const char *entry)
7364 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7365 filename += strlen("/proc/");
7366 if (!strncmp(filename, "self/", strlen("self/"))) {
7367 filename += strlen("self/");
7368 } else if (*filename >= '1' && *filename <= '9') {
7370 snprintf(myself, sizeof(myself), "%d/", getpid());
7371 if (!strncmp(filename, myself, strlen(myself))) {
7372 filename += strlen(myself);
7379 if (!strcmp(filename, entry)) {
7386 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7387 static int is_proc(const char *filename, const char *entry)
7389 return strcmp(filename, entry) == 0;
7392 static int open_net_route(void *cpu_env, int fd)
7399 fp = fopen("/proc/net/route", "r");
7406 read = getline(&line, &len, fp);
7407 dprintf(fd, "%s", line);
7411 while ((read = getline(&line, &len, fp)) != -1) {
7413 uint32_t dest, gw, mask;
7414 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7415 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7416 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7417 &mask, &mtu, &window, &irtt);
7418 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7419 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7420 metric, tswap32(mask), mtu, window, irtt);
7430 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7433 const char *filename;
7434 int (*fill)(void *cpu_env, int fd);
7435 int (*cmp)(const char *s1, const char *s2);
7437 const struct fake_open *fake_open;
7438 static const struct fake_open fakes[] = {
7439 { "maps", open_self_maps, is_proc_myself },
7440 { "stat", open_self_stat, is_proc_myself },
7441 { "auxv", open_self_auxv, is_proc_myself },
7442 { "cmdline", open_self_cmdline, is_proc_myself },
7443 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7444 { "/proc/net/route", open_net_route, is_proc },
7446 { NULL, NULL, NULL }
7449 if (is_proc_myself(pathname, "exe")) {
7450 int execfd = qemu_getauxval(AT_EXECFD);
7451 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7454 for (fake_open = fakes; fake_open->filename; fake_open++) {
7455 if (fake_open->cmp(pathname, fake_open->filename)) {
7460 if (fake_open->filename) {
7462 char filename[PATH_MAX];
7465 /* create temporary file to map stat to */
7466 tmpdir = getenv("TMPDIR");
7469 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7470 fd = mkstemp(filename);
7476 if ((r = fake_open->fill(cpu_env, fd))) {
7482 lseek(fd, 0, SEEK_SET);
7487 return safe_openat(dirfd, path(pathname), flags, mode);
7490 #define TIMER_MAGIC 0x0caf0000
7491 #define TIMER_MAGIC_MASK 0xffff0000
7493 /* Convert QEMU provided timer ID back to internal 16bit index format */
7494 static target_timer_t get_timer_id(abi_long arg)
7496 target_timer_t timerid = arg;
7498 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7499 return -TARGET_EINVAL;
7504 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7505 return -TARGET_EINVAL;
7511 /* do_syscall() should always have a single exit point at the end so
7512 that actions, such as logging of syscall results, can be performed.
7513 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7514 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7515 abi_long arg2, abi_long arg3, abi_long arg4,
7516 abi_long arg5, abi_long arg6, abi_long arg7,
7519 CPUState *cpu = ENV_GET_CPU(cpu_env);
7525 #if defined(DEBUG_ERESTARTSYS)
7526 /* Debug-only code for exercising the syscall-restart code paths
7527 * in the per-architecture cpu main loops: restart every syscall
7528 * the guest makes once before letting it through.
7535 return -TARGET_ERESTARTSYS;
7541 gemu_log("syscall %d", num);
7543 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7545 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7548 case TARGET_NR_exit:
7549 /* In old applications this may be used to implement _exit(2).
7550 However in threaded applictions it is used for thread termination,
7551 and _exit_group is used for application termination.
7552 Do thread termination if we have more then one thread. */
7554 if (block_signals()) {
7555 ret = -TARGET_ERESTARTSYS;
7561 if (CPU_NEXT(first_cpu)) {
7564 /* Remove the CPU from the list. */
7565 QTAILQ_REMOVE(&cpus, cpu, node);
7570 if (ts->child_tidptr) {
7571 put_user_u32(0, ts->child_tidptr);
7572 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7576 object_unref(OBJECT(cpu));
7578 rcu_unregister_thread();
7586 gdb_exit(cpu_env, arg1);
7588 ret = 0; /* avoid warning */
7590 case TARGET_NR_read:
7594 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7596 ret = get_errno(safe_read(arg1, p, arg3));
7598 fd_trans_host_to_target_data(arg1)) {
7599 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7601 unlock_user(p, arg2, ret);
7604 case TARGET_NR_write:
7605 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7607 ret = get_errno(safe_write(arg1, p, arg3));
7608 unlock_user(p, arg2, 0);
7610 #ifdef TARGET_NR_open
7611 case TARGET_NR_open:
7612 if (!(p = lock_user_string(arg1)))
7614 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7615 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7617 fd_trans_unregister(ret);
7618 unlock_user(p, arg1, 0);
7621 case TARGET_NR_openat:
7622 if (!(p = lock_user_string(arg2)))
7624 ret = get_errno(do_openat(cpu_env, arg1, p,
7625 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7627 fd_trans_unregister(ret);
7628 unlock_user(p, arg2, 0);
7630 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7631 case TARGET_NR_name_to_handle_at:
7632 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7635 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7636 case TARGET_NR_open_by_handle_at:
7637 ret = do_open_by_handle_at(arg1, arg2, arg3);
7638 fd_trans_unregister(ret);
7641 case TARGET_NR_close:
7642 fd_trans_unregister(arg1);
7643 ret = get_errno(close(arg1));
7648 #ifdef TARGET_NR_fork
7649 case TARGET_NR_fork:
7650 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7653 #ifdef TARGET_NR_waitpid
7654 case TARGET_NR_waitpid:
7657 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7658 if (!is_error(ret) && arg2 && ret
7659 && put_user_s32(host_to_target_waitstatus(status), arg2))
7664 #ifdef TARGET_NR_waitid
7665 case TARGET_NR_waitid:
7669 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7670 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7671 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7673 host_to_target_siginfo(p, &info);
7674 unlock_user(p, arg3, sizeof(target_siginfo_t));
7679 #ifdef TARGET_NR_creat /* not on alpha */
7680 case TARGET_NR_creat:
7681 if (!(p = lock_user_string(arg1)))
7683 ret = get_errno(creat(p, arg2));
7684 fd_trans_unregister(ret);
7685 unlock_user(p, arg1, 0);
7688 #ifdef TARGET_NR_link
7689 case TARGET_NR_link:
7692 p = lock_user_string(arg1);
7693 p2 = lock_user_string(arg2);
7695 ret = -TARGET_EFAULT;
7697 ret = get_errno(link(p, p2));
7698 unlock_user(p2, arg2, 0);
7699 unlock_user(p, arg1, 0);
7703 #if defined(TARGET_NR_linkat)
7704 case TARGET_NR_linkat:
7709 p = lock_user_string(arg2);
7710 p2 = lock_user_string(arg4);
7712 ret = -TARGET_EFAULT;
7714 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7715 unlock_user(p, arg2, 0);
7716 unlock_user(p2, arg4, 0);
7720 #ifdef TARGET_NR_unlink
7721 case TARGET_NR_unlink:
7722 if (!(p = lock_user_string(arg1)))
7724 ret = get_errno(unlink(p));
7725 unlock_user(p, arg1, 0);
7728 #if defined(TARGET_NR_unlinkat)
7729 case TARGET_NR_unlinkat:
7730 if (!(p = lock_user_string(arg2)))
7732 ret = get_errno(unlinkat(arg1, p, arg3));
7733 unlock_user(p, arg2, 0);
7736 case TARGET_NR_execve:
7738 char **argp, **envp;
7741 abi_ulong guest_argp;
7742 abi_ulong guest_envp;
7749 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7750 if (get_user_ual(addr, gp))
7758 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7759 if (get_user_ual(addr, gp))
7766 argp = alloca((argc + 1) * sizeof(void *));
7767 envp = alloca((envc + 1) * sizeof(void *));
7769 for (gp = guest_argp, q = argp; gp;
7770 gp += sizeof(abi_ulong), q++) {
7771 if (get_user_ual(addr, gp))
7775 if (!(*q = lock_user_string(addr)))
7777 total_size += strlen(*q) + 1;
7781 for (gp = guest_envp, q = envp; gp;
7782 gp += sizeof(abi_ulong), q++) {
7783 if (get_user_ual(addr, gp))
7787 if (!(*q = lock_user_string(addr)))
7789 total_size += strlen(*q) + 1;
7793 if (!(p = lock_user_string(arg1)))
7795 /* Although execve() is not an interruptible syscall it is
7796 * a special case where we must use the safe_syscall wrapper:
7797 * if we allow a signal to happen before we make the host
7798 * syscall then we will 'lose' it, because at the point of
7799 * execve the process leaves QEMU's control. So we use the
7800 * safe syscall wrapper to ensure that we either take the
7801 * signal as a guest signal, or else it does not happen
7802 * before the execve completes and makes it the other
7803 * program's problem.
7805 ret = get_errno(safe_execve(p, argp, envp));
7806 unlock_user(p, arg1, 0);
7811 ret = -TARGET_EFAULT;
7814 for (gp = guest_argp, q = argp; *q;
7815 gp += sizeof(abi_ulong), q++) {
7816 if (get_user_ual(addr, gp)
7819 unlock_user(*q, addr, 0);
7821 for (gp = guest_envp, q = envp; *q;
7822 gp += sizeof(abi_ulong), q++) {
7823 if (get_user_ual(addr, gp)
7826 unlock_user(*q, addr, 0);
7830 case TARGET_NR_chdir:
7831 if (!(p = lock_user_string(arg1)))
7833 ret = get_errno(chdir(p));
7834 unlock_user(p, arg1, 0);
7836 #ifdef TARGET_NR_time
7837 case TARGET_NR_time:
7840 ret = get_errno(time(&host_time));
7843 && put_user_sal(host_time, arg1))
7848 #ifdef TARGET_NR_mknod
7849 case TARGET_NR_mknod:
7850 if (!(p = lock_user_string(arg1)))
7852 ret = get_errno(mknod(p, arg2, arg3));
7853 unlock_user(p, arg1, 0);
7856 #if defined(TARGET_NR_mknodat)
7857 case TARGET_NR_mknodat:
7858 if (!(p = lock_user_string(arg2)))
7860 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7861 unlock_user(p, arg2, 0);
7864 #ifdef TARGET_NR_chmod
7865 case TARGET_NR_chmod:
7866 if (!(p = lock_user_string(arg1)))
7868 ret = get_errno(chmod(p, arg2));
7869 unlock_user(p, arg1, 0);
7872 #ifdef TARGET_NR_break
7873 case TARGET_NR_break:
7876 #ifdef TARGET_NR_oldstat
7877 case TARGET_NR_oldstat:
7880 case TARGET_NR_lseek:
7881 ret = get_errno(lseek(arg1, arg2, arg3));
7883 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7884 /* Alpha specific */
7885 case TARGET_NR_getxpid:
7886 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7887 ret = get_errno(getpid());
7890 #ifdef TARGET_NR_getpid
7891 case TARGET_NR_getpid:
7892 ret = get_errno(getpid());
7895 case TARGET_NR_mount:
7897 /* need to look at the data field */
7901 p = lock_user_string(arg1);
7909 p2 = lock_user_string(arg2);
7912 unlock_user(p, arg1, 0);
7918 p3 = lock_user_string(arg3);
7921 unlock_user(p, arg1, 0);
7923 unlock_user(p2, arg2, 0);
7930 /* FIXME - arg5 should be locked, but it isn't clear how to
7931 * do that since it's not guaranteed to be a NULL-terminated
7935 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7937 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7939 ret = get_errno(ret);
7942 unlock_user(p, arg1, 0);
7944 unlock_user(p2, arg2, 0);
7946 unlock_user(p3, arg3, 0);
7950 #ifdef TARGET_NR_umount
7951 case TARGET_NR_umount:
7952 if (!(p = lock_user_string(arg1)))
7954 ret = get_errno(umount(p));
7955 unlock_user(p, arg1, 0);
7958 #ifdef TARGET_NR_stime /* not on alpha */
7959 case TARGET_NR_stime:
7962 if (get_user_sal(host_time, arg1))
7964 ret = get_errno(stime(&host_time));
7968 case TARGET_NR_ptrace:
7970 #ifdef TARGET_NR_alarm /* not on alpha */
7971 case TARGET_NR_alarm:
7975 #ifdef TARGET_NR_oldfstat
7976 case TARGET_NR_oldfstat:
7979 #ifdef TARGET_NR_pause /* not on alpha */
7980 case TARGET_NR_pause:
7981 if (!block_signals()) {
7982 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7984 ret = -TARGET_EINTR;
7987 #ifdef TARGET_NR_utime
7988 case TARGET_NR_utime:
7990 struct utimbuf tbuf, *host_tbuf;
7991 struct target_utimbuf *target_tbuf;
7993 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7995 tbuf.actime = tswapal(target_tbuf->actime);
7996 tbuf.modtime = tswapal(target_tbuf->modtime);
7997 unlock_user_struct(target_tbuf, arg2, 0);
8002 if (!(p = lock_user_string(arg1)))
8004 ret = get_errno(utime(p, host_tbuf));
8005 unlock_user(p, arg1, 0);
8009 #ifdef TARGET_NR_utimes
8010 case TARGET_NR_utimes:
8012 struct timeval *tvp, tv[2];
8014 if (copy_from_user_timeval(&tv[0], arg2)
8015 || copy_from_user_timeval(&tv[1],
8016 arg2 + sizeof(struct target_timeval)))
8022 if (!(p = lock_user_string(arg1)))
8024 ret = get_errno(utimes(p, tvp));
8025 unlock_user(p, arg1, 0);
8029 #if defined(TARGET_NR_futimesat)
8030 case TARGET_NR_futimesat:
8032 struct timeval *tvp, tv[2];
8034 if (copy_from_user_timeval(&tv[0], arg3)
8035 || copy_from_user_timeval(&tv[1],
8036 arg3 + sizeof(struct target_timeval)))
8042 if (!(p = lock_user_string(arg2)))
8044 ret = get_errno(futimesat(arg1, path(p), tvp));
8045 unlock_user(p, arg2, 0);
8049 #ifdef TARGET_NR_stty
8050 case TARGET_NR_stty:
8053 #ifdef TARGET_NR_gtty
8054 case TARGET_NR_gtty:
8057 #ifdef TARGET_NR_access
8058 case TARGET_NR_access:
8059 if (!(p = lock_user_string(arg1)))
8061 ret = get_errno(access(path(p), arg2));
8062 unlock_user(p, arg1, 0);
8065 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8066 case TARGET_NR_faccessat:
8067 if (!(p = lock_user_string(arg2)))
8069 ret = get_errno(faccessat(arg1, p, arg3, 0));
8070 unlock_user(p, arg2, 0);
8073 #ifdef TARGET_NR_nice /* not on alpha */
8074 case TARGET_NR_nice:
8075 ret = get_errno(nice(arg1));
8078 #ifdef TARGET_NR_ftime
8079 case TARGET_NR_ftime:
8082 case TARGET_NR_sync:
8086 case TARGET_NR_kill:
8087 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8089 #ifdef TARGET_NR_rename
8090 case TARGET_NR_rename:
8093 p = lock_user_string(arg1);
8094 p2 = lock_user_string(arg2);
8096 ret = -TARGET_EFAULT;
8098 ret = get_errno(rename(p, p2));
8099 unlock_user(p2, arg2, 0);
8100 unlock_user(p, arg1, 0);
8104 #if defined(TARGET_NR_renameat)
8105 case TARGET_NR_renameat:
8108 p = lock_user_string(arg2);
8109 p2 = lock_user_string(arg4);
8111 ret = -TARGET_EFAULT;
8113 ret = get_errno(renameat(arg1, p, arg3, p2));
8114 unlock_user(p2, arg4, 0);
8115 unlock_user(p, arg2, 0);
8119 #ifdef TARGET_NR_mkdir
8120 case TARGET_NR_mkdir:
8121 if (!(p = lock_user_string(arg1)))
8123 ret = get_errno(mkdir(p, arg2));
8124 unlock_user(p, arg1, 0);
8127 #if defined(TARGET_NR_mkdirat)
8128 case TARGET_NR_mkdirat:
8129 if (!(p = lock_user_string(arg2)))
8131 ret = get_errno(mkdirat(arg1, p, arg3));
8132 unlock_user(p, arg2, 0);
8135 #ifdef TARGET_NR_rmdir
8136 case TARGET_NR_rmdir:
8137 if (!(p = lock_user_string(arg1)))
8139 ret = get_errno(rmdir(p));
8140 unlock_user(p, arg1, 0);
8144 ret = get_errno(dup(arg1));
8146 fd_trans_dup(arg1, ret);
8149 #ifdef TARGET_NR_pipe
8150 case TARGET_NR_pipe:
8151 ret = do_pipe(cpu_env, arg1, 0, 0);
8154 #ifdef TARGET_NR_pipe2
8155 case TARGET_NR_pipe2:
8156 ret = do_pipe(cpu_env, arg1,
8157 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8160 case TARGET_NR_times:
8162 struct target_tms *tmsp;
8164 ret = get_errno(times(&tms));
8166 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8169 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8170 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8171 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8172 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8175 ret = host_to_target_clock_t(ret);
8178 #ifdef TARGET_NR_prof
8179 case TARGET_NR_prof:
8182 #ifdef TARGET_NR_signal
8183 case TARGET_NR_signal:
8186 case TARGET_NR_acct:
8188 ret = get_errno(acct(NULL));
8190 if (!(p = lock_user_string(arg1)))
8192 ret = get_errno(acct(path(p)));
8193 unlock_user(p, arg1, 0);
8196 #ifdef TARGET_NR_umount2
8197 case TARGET_NR_umount2:
8198 if (!(p = lock_user_string(arg1)))
8200 ret = get_errno(umount2(p, arg2));
8201 unlock_user(p, arg1, 0);
8204 #ifdef TARGET_NR_lock
8205 case TARGET_NR_lock:
8208 case TARGET_NR_ioctl:
8209 ret = do_ioctl(arg1, arg2, arg3);
8211 case TARGET_NR_fcntl:
8212 ret = do_fcntl(arg1, arg2, arg3);
8214 #ifdef TARGET_NR_mpx
8218 case TARGET_NR_setpgid:
8219 ret = get_errno(setpgid(arg1, arg2));
8221 #ifdef TARGET_NR_ulimit
8222 case TARGET_NR_ulimit:
8225 #ifdef TARGET_NR_oldolduname
8226 case TARGET_NR_oldolduname:
8229 case TARGET_NR_umask:
8230 ret = get_errno(umask(arg1));
8232 case TARGET_NR_chroot:
8233 if (!(p = lock_user_string(arg1)))
8235 ret = get_errno(chroot(p));
8236 unlock_user(p, arg1, 0);
8238 #ifdef TARGET_NR_ustat
8239 case TARGET_NR_ustat:
8242 #ifdef TARGET_NR_dup2
8243 case TARGET_NR_dup2:
8244 ret = get_errno(dup2(arg1, arg2));
8246 fd_trans_dup(arg1, arg2);
8250 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8251 case TARGET_NR_dup3:
8252 ret = get_errno(dup3(arg1, arg2, arg3));
8254 fd_trans_dup(arg1, arg2);
8258 #ifdef TARGET_NR_getppid /* not on alpha */
8259 case TARGET_NR_getppid:
8260 ret = get_errno(getppid());
8263 #ifdef TARGET_NR_getpgrp
8264 case TARGET_NR_getpgrp:
8265 ret = get_errno(getpgrp());
8268 case TARGET_NR_setsid:
8269 ret = get_errno(setsid());
8271 #ifdef TARGET_NR_sigaction
8272 case TARGET_NR_sigaction:
8274 #if defined(TARGET_ALPHA)
8275 struct target_sigaction act, oact, *pact = 0;
8276 struct target_old_sigaction *old_act;
8278 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8280 act._sa_handler = old_act->_sa_handler;
8281 target_siginitset(&act.sa_mask, old_act->sa_mask);
8282 act.sa_flags = old_act->sa_flags;
8283 act.sa_restorer = 0;
8284 unlock_user_struct(old_act, arg2, 0);
8287 ret = get_errno(do_sigaction(arg1, pact, &oact));
8288 if (!is_error(ret) && arg3) {
8289 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8291 old_act->_sa_handler = oact._sa_handler;
8292 old_act->sa_mask = oact.sa_mask.sig[0];
8293 old_act->sa_flags = oact.sa_flags;
8294 unlock_user_struct(old_act, arg3, 1);
8296 #elif defined(TARGET_MIPS)
8297 struct target_sigaction act, oact, *pact, *old_act;
8300 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8302 act._sa_handler = old_act->_sa_handler;
8303 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8304 act.sa_flags = old_act->sa_flags;
8305 unlock_user_struct(old_act, arg2, 0);
8311 ret = get_errno(do_sigaction(arg1, pact, &oact));
8313 if (!is_error(ret) && arg3) {
8314 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8316 old_act->_sa_handler = oact._sa_handler;
8317 old_act->sa_flags = oact.sa_flags;
8318 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8319 old_act->sa_mask.sig[1] = 0;
8320 old_act->sa_mask.sig[2] = 0;
8321 old_act->sa_mask.sig[3] = 0;
8322 unlock_user_struct(old_act, arg3, 1);
8325 struct target_old_sigaction *old_act;
8326 struct target_sigaction act, oact, *pact;
8328 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8330 act._sa_handler = old_act->_sa_handler;
8331 target_siginitset(&act.sa_mask, old_act->sa_mask);
8332 act.sa_flags = old_act->sa_flags;
8333 act.sa_restorer = old_act->sa_restorer;
8334 unlock_user_struct(old_act, arg2, 0);
8339 ret = get_errno(do_sigaction(arg1, pact, &oact));
8340 if (!is_error(ret) && arg3) {
8341 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8343 old_act->_sa_handler = oact._sa_handler;
8344 old_act->sa_mask = oact.sa_mask.sig[0];
8345 old_act->sa_flags = oact.sa_flags;
8346 old_act->sa_restorer = oact.sa_restorer;
8347 unlock_user_struct(old_act, arg3, 1);
8353 case TARGET_NR_rt_sigaction:
8355 #if defined(TARGET_ALPHA)
8356 struct target_sigaction act, oact, *pact = 0;
8357 struct target_rt_sigaction *rt_act;
8359 if (arg4 != sizeof(target_sigset_t)) {
8360 ret = -TARGET_EINVAL;
8364 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8366 act._sa_handler = rt_act->_sa_handler;
8367 act.sa_mask = rt_act->sa_mask;
8368 act.sa_flags = rt_act->sa_flags;
8369 act.sa_restorer = arg5;
8370 unlock_user_struct(rt_act, arg2, 0);
8373 ret = get_errno(do_sigaction(arg1, pact, &oact));
8374 if (!is_error(ret) && arg3) {
8375 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8377 rt_act->_sa_handler = oact._sa_handler;
8378 rt_act->sa_mask = oact.sa_mask;
8379 rt_act->sa_flags = oact.sa_flags;
8380 unlock_user_struct(rt_act, arg3, 1);
8383 struct target_sigaction *act;
8384 struct target_sigaction *oact;
8386 if (arg4 != sizeof(target_sigset_t)) {
8387 ret = -TARGET_EINVAL;
8391 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8396 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8397 ret = -TARGET_EFAULT;
8398 goto rt_sigaction_fail;
8402 ret = get_errno(do_sigaction(arg1, act, oact));
8405 unlock_user_struct(act, arg2, 0);
8407 unlock_user_struct(oact, arg3, 1);
8411 #ifdef TARGET_NR_sgetmask /* not on alpha */
8412 case TARGET_NR_sgetmask:
8415 abi_ulong target_set;
8416 ret = do_sigprocmask(0, NULL, &cur_set);
8418 host_to_target_old_sigset(&target_set, &cur_set);
8424 #ifdef TARGET_NR_ssetmask /* not on alpha */
8425 case TARGET_NR_ssetmask:
8427 sigset_t set, oset, cur_set;
8428 abi_ulong target_set = arg1;
8429 /* We only have one word of the new mask so we must read
8430 * the rest of it with do_sigprocmask() and OR in this word.
8431 * We are guaranteed that a do_sigprocmask() that only queries
8432 * the signal mask will not fail.
8434 ret = do_sigprocmask(0, NULL, &cur_set);
8436 target_to_host_old_sigset(&set, &target_set);
8437 sigorset(&set, &set, &cur_set);
8438 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8440 host_to_target_old_sigset(&target_set, &oset);
8446 #ifdef TARGET_NR_sigprocmask
8447 case TARGET_NR_sigprocmask:
8449 #if defined(TARGET_ALPHA)
8450 sigset_t set, oldset;
8455 case TARGET_SIG_BLOCK:
8458 case TARGET_SIG_UNBLOCK:
8461 case TARGET_SIG_SETMASK:
8465 ret = -TARGET_EINVAL;
8469 target_to_host_old_sigset(&set, &mask);
8471 ret = do_sigprocmask(how, &set, &oldset);
8472 if (!is_error(ret)) {
8473 host_to_target_old_sigset(&mask, &oldset);
8475 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8478 sigset_t set, oldset, *set_ptr;
8483 case TARGET_SIG_BLOCK:
8486 case TARGET_SIG_UNBLOCK:
8489 case TARGET_SIG_SETMASK:
8493 ret = -TARGET_EINVAL;
8496 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8498 target_to_host_old_sigset(&set, p);
8499 unlock_user(p, arg2, 0);
8505 ret = do_sigprocmask(how, set_ptr, &oldset);
8506 if (!is_error(ret) && arg3) {
8507 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8509 host_to_target_old_sigset(p, &oldset);
8510 unlock_user(p, arg3, sizeof(target_sigset_t));
8516 case TARGET_NR_rt_sigprocmask:
8519 sigset_t set, oldset, *set_ptr;
8521 if (arg4 != sizeof(target_sigset_t)) {
8522 ret = -TARGET_EINVAL;
8528 case TARGET_SIG_BLOCK:
8531 case TARGET_SIG_UNBLOCK:
8534 case TARGET_SIG_SETMASK:
8538 ret = -TARGET_EINVAL;
8541 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8543 target_to_host_sigset(&set, p);
8544 unlock_user(p, arg2, 0);
8550 ret = do_sigprocmask(how, set_ptr, &oldset);
8551 if (!is_error(ret) && arg3) {
8552 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8554 host_to_target_sigset(p, &oldset);
8555 unlock_user(p, arg3, sizeof(target_sigset_t));
8559 #ifdef TARGET_NR_sigpending
8560 case TARGET_NR_sigpending:
8563 ret = get_errno(sigpending(&set));
8564 if (!is_error(ret)) {
8565 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8567 host_to_target_old_sigset(p, &set);
8568 unlock_user(p, arg1, sizeof(target_sigset_t));
8573 case TARGET_NR_rt_sigpending:
8577 /* Yes, this check is >, not != like most. We follow the kernel's
8578 * logic and it does it like this because it implements
8579 * NR_sigpending through the same code path, and in that case
8580 * the old_sigset_t is smaller in size.
8582 if (arg2 > sizeof(target_sigset_t)) {
8583 ret = -TARGET_EINVAL;
8587 ret = get_errno(sigpending(&set));
8588 if (!is_error(ret)) {
8589 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8591 host_to_target_sigset(p, &set);
8592 unlock_user(p, arg1, sizeof(target_sigset_t));
8596 #ifdef TARGET_NR_sigsuspend
8597 case TARGET_NR_sigsuspend:
8599 TaskState *ts = cpu->opaque;
8600 #if defined(TARGET_ALPHA)
8601 abi_ulong mask = arg1;
8602 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8604 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8606 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8607 unlock_user(p, arg1, 0);
8609 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8611 if (ret != -TARGET_ERESTARTSYS) {
8612 ts->in_sigsuspend = 1;
8617 case TARGET_NR_rt_sigsuspend:
8619 TaskState *ts = cpu->opaque;
8621 if (arg2 != sizeof(target_sigset_t)) {
8622 ret = -TARGET_EINVAL;
8625 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8627 target_to_host_sigset(&ts->sigsuspend_mask, p);
8628 unlock_user(p, arg1, 0);
8629 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8631 if (ret != -TARGET_ERESTARTSYS) {
8632 ts->in_sigsuspend = 1;
8636 case TARGET_NR_rt_sigtimedwait:
8639 struct timespec uts, *puts;
8642 if (arg4 != sizeof(target_sigset_t)) {
8643 ret = -TARGET_EINVAL;
8647 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8649 target_to_host_sigset(&set, p);
8650 unlock_user(p, arg1, 0);
8653 target_to_host_timespec(puts, arg3);
8657 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8659 if (!is_error(ret)) {
8661 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8666 host_to_target_siginfo(p, &uinfo);
8667 unlock_user(p, arg2, sizeof(target_siginfo_t));
8669 ret = host_to_target_signal(ret);
8673 case TARGET_NR_rt_sigqueueinfo:
8677 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8681 target_to_host_siginfo(&uinfo, p);
8682 unlock_user(p, arg1, 0);
8683 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8686 #ifdef TARGET_NR_sigreturn
8687 case TARGET_NR_sigreturn:
8688 if (block_signals()) {
8689 ret = -TARGET_ERESTARTSYS;
8691 ret = do_sigreturn(cpu_env);
8695 case TARGET_NR_rt_sigreturn:
8696 if (block_signals()) {
8697 ret = -TARGET_ERESTARTSYS;
8699 ret = do_rt_sigreturn(cpu_env);
8702 case TARGET_NR_sethostname:
8703 if (!(p = lock_user_string(arg1)))
8705 ret = get_errno(sethostname(p, arg2));
8706 unlock_user(p, arg1, 0);
8708 case TARGET_NR_setrlimit:
8710 int resource = target_to_host_resource(arg1);
8711 struct target_rlimit *target_rlim;
8713 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8715 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8716 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8717 unlock_user_struct(target_rlim, arg2, 0);
8718 ret = get_errno(setrlimit(resource, &rlim));
8721 case TARGET_NR_getrlimit:
8723 int resource = target_to_host_resource(arg1);
8724 struct target_rlimit *target_rlim;
8727 ret = get_errno(getrlimit(resource, &rlim));
8728 if (!is_error(ret)) {
8729 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8731 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8732 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8733 unlock_user_struct(target_rlim, arg2, 1);
8737 case TARGET_NR_getrusage:
8739 struct rusage rusage;
8740 ret = get_errno(getrusage(arg1, &rusage));
8741 if (!is_error(ret)) {
8742 ret = host_to_target_rusage(arg2, &rusage);
8746 case TARGET_NR_gettimeofday:
8749 ret = get_errno(gettimeofday(&tv, NULL));
8750 if (!is_error(ret)) {
8751 if (copy_to_user_timeval(arg1, &tv))
8756 case TARGET_NR_settimeofday:
8758 struct timeval tv, *ptv = NULL;
8759 struct timezone tz, *ptz = NULL;
8762 if (copy_from_user_timeval(&tv, arg1)) {
8769 if (copy_from_user_timezone(&tz, arg2)) {
8775 ret = get_errno(settimeofday(ptv, ptz));
8778 #if defined(TARGET_NR_select)
8779 case TARGET_NR_select:
8780 #if defined(TARGET_WANT_NI_OLD_SELECT)
8781 /* some architectures used to have old_select here
8782 * but now ENOSYS it.
8784 ret = -TARGET_ENOSYS;
8785 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8786 ret = do_old_select(arg1);
8788 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8792 #ifdef TARGET_NR_pselect6
8793 case TARGET_NR_pselect6:
8795 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8796 fd_set rfds, wfds, efds;
8797 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8798 struct timespec ts, *ts_ptr;
8801 * The 6th arg is actually two args smashed together,
8802 * so we cannot use the C library.
8810 abi_ulong arg_sigset, arg_sigsize, *arg7;
8811 target_sigset_t *target_sigset;
8819 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8823 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8827 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8833 * This takes a timespec, and not a timeval, so we cannot
8834 * use the do_select() helper ...
8837 if (target_to_host_timespec(&ts, ts_addr)) {
8845 /* Extract the two packed args for the sigset */
8848 sig.size = SIGSET_T_SIZE;
8850 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8854 arg_sigset = tswapal(arg7[0]);
8855 arg_sigsize = tswapal(arg7[1]);
8856 unlock_user(arg7, arg6, 0);
8860 if (arg_sigsize != sizeof(*target_sigset)) {
8861 /* Like the kernel, we enforce correct size sigsets */
8862 ret = -TARGET_EINVAL;
8865 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8866 sizeof(*target_sigset), 1);
8867 if (!target_sigset) {
8870 target_to_host_sigset(&set, target_sigset);
8871 unlock_user(target_sigset, arg_sigset, 0);
8879 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8882 if (!is_error(ret)) {
8883 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8885 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8887 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8890 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8896 #ifdef TARGET_NR_symlink
8897 case TARGET_NR_symlink:
8900 p = lock_user_string(arg1);
8901 p2 = lock_user_string(arg2);
8903 ret = -TARGET_EFAULT;
8905 ret = get_errno(symlink(p, p2));
8906 unlock_user(p2, arg2, 0);
8907 unlock_user(p, arg1, 0);
8911 #if defined(TARGET_NR_symlinkat)
8912 case TARGET_NR_symlinkat:
8915 p = lock_user_string(arg1);
8916 p2 = lock_user_string(arg3);
8918 ret = -TARGET_EFAULT;
8920 ret = get_errno(symlinkat(p, arg2, p2));
8921 unlock_user(p2, arg3, 0);
8922 unlock_user(p, arg1, 0);
8926 #ifdef TARGET_NR_oldlstat
8927 case TARGET_NR_oldlstat:
8930 #ifdef TARGET_NR_readlink
8931 case TARGET_NR_readlink:
8934 p = lock_user_string(arg1);
8935 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8937 ret = -TARGET_EFAULT;
8939 /* Short circuit this for the magic exe check. */
8940 ret = -TARGET_EINVAL;
8941 } else if (is_proc_myself((const char *)p, "exe")) {
8942 char real[PATH_MAX], *temp;
8943 temp = realpath(exec_path, real);
8944 /* Return value is # of bytes that we wrote to the buffer. */
8946 ret = get_errno(-1);
8948 /* Don't worry about sign mismatch as earlier mapping
8949 * logic would have thrown a bad address error. */
8950 ret = MIN(strlen(real), arg3);
8951 /* We cannot NUL terminate the string. */
8952 memcpy(p2, real, ret);
8955 ret = get_errno(readlink(path(p), p2, arg3));
8957 unlock_user(p2, arg2, ret);
8958 unlock_user(p, arg1, 0);
8962 #if defined(TARGET_NR_readlinkat)
8963 case TARGET_NR_readlinkat:
8966 p = lock_user_string(arg2);
8967 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8969 ret = -TARGET_EFAULT;
8970 } else if (is_proc_myself((const char *)p, "exe")) {
8971 char real[PATH_MAX], *temp;
8972 temp = realpath(exec_path, real);
8973 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8974 snprintf((char *)p2, arg4, "%s", real);
8976 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8978 unlock_user(p2, arg3, ret);
8979 unlock_user(p, arg2, 0);
8983 #ifdef TARGET_NR_uselib
8984 case TARGET_NR_uselib:
8987 #ifdef TARGET_NR_swapon
8988 case TARGET_NR_swapon:
8989 if (!(p = lock_user_string(arg1)))
8991 ret = get_errno(swapon(p, arg2));
8992 unlock_user(p, arg1, 0);
8995 case TARGET_NR_reboot:
8996 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8997 /* arg4 must be ignored in all other cases */
8998 p = lock_user_string(arg4);
9002 ret = get_errno(reboot(arg1, arg2, arg3, p));
9003 unlock_user(p, arg4, 0);
9005 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9008 #ifdef TARGET_NR_readdir
9009 case TARGET_NR_readdir:
9012 #ifdef TARGET_NR_mmap
9013 case TARGET_NR_mmap:
9014 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9015 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9016 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9017 || defined(TARGET_S390X)
9020 abi_ulong v1, v2, v3, v4, v5, v6;
9021 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9029 unlock_user(v, arg1, 0);
9030 ret = get_errno(target_mmap(v1, v2, v3,
9031 target_to_host_bitmask(v4, mmap_flags_tbl),
9035 ret = get_errno(target_mmap(arg1, arg2, arg3,
9036 target_to_host_bitmask(arg4, mmap_flags_tbl),
9042 #ifdef TARGET_NR_mmap2
9043 case TARGET_NR_mmap2:
9045 #define MMAP_SHIFT 12
9047 ret = get_errno(target_mmap(arg1, arg2, arg3,
9048 target_to_host_bitmask(arg4, mmap_flags_tbl),
9050 arg6 << MMAP_SHIFT));
9053 case TARGET_NR_munmap:
9054 ret = get_errno(target_munmap(arg1, arg2));
9056 case TARGET_NR_mprotect:
9058 TaskState *ts = cpu->opaque;
9059 /* Special hack to detect libc making the stack executable. */
9060 if ((arg3 & PROT_GROWSDOWN)
9061 && arg1 >= ts->info->stack_limit
9062 && arg1 <= ts->info->start_stack) {
9063 arg3 &= ~PROT_GROWSDOWN;
9064 arg2 = arg2 + arg1 - ts->info->stack_limit;
9065 arg1 = ts->info->stack_limit;
9068 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9070 #ifdef TARGET_NR_mremap
9071 case TARGET_NR_mremap:
9072 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9075 /* ??? msync/mlock/munlock are broken for softmmu. */
9076 #ifdef TARGET_NR_msync
9077 case TARGET_NR_msync:
9078 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9081 #ifdef TARGET_NR_mlock
9082 case TARGET_NR_mlock:
9083 ret = get_errno(mlock(g2h(arg1), arg2));
9086 #ifdef TARGET_NR_munlock
9087 case TARGET_NR_munlock:
9088 ret = get_errno(munlock(g2h(arg1), arg2));
9091 #ifdef TARGET_NR_mlockall
9092 case TARGET_NR_mlockall:
9093 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9096 #ifdef TARGET_NR_munlockall
9097 case TARGET_NR_munlockall:
9098 ret = get_errno(munlockall());
9101 case TARGET_NR_truncate:
9102 if (!(p = lock_user_string(arg1)))
9104 ret = get_errno(truncate(p, arg2));
9105 unlock_user(p, arg1, 0);
9107 case TARGET_NR_ftruncate:
9108 ret = get_errno(ftruncate(arg1, arg2));
9110 case TARGET_NR_fchmod:
9111 ret = get_errno(fchmod(arg1, arg2));
9113 #if defined(TARGET_NR_fchmodat)
9114 case TARGET_NR_fchmodat:
9115 if (!(p = lock_user_string(arg2)))
9117 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9118 unlock_user(p, arg2, 0);
9121 case TARGET_NR_getpriority:
9122 /* Note that negative values are valid for getpriority, so we must
9123 differentiate based on errno settings. */
9125 ret = getpriority(arg1, arg2);
9126 if (ret == -1 && errno != 0) {
9127 ret = -host_to_target_errno(errno);
9131 /* Return value is the unbiased priority. Signal no error. */
9132 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9134 /* Return value is a biased priority to avoid negative numbers. */
9138 case TARGET_NR_setpriority:
9139 ret = get_errno(setpriority(arg1, arg2, arg3));
9141 #ifdef TARGET_NR_profil
9142 case TARGET_NR_profil:
9145 case TARGET_NR_statfs:
9146 if (!(p = lock_user_string(arg1)))
9148 ret = get_errno(statfs(path(p), &stfs));
9149 unlock_user(p, arg1, 0);
9151 if (!is_error(ret)) {
9152 struct target_statfs *target_stfs;
9154 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9156 __put_user(stfs.f_type, &target_stfs->f_type);
9157 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9158 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9159 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9160 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9161 __put_user(stfs.f_files, &target_stfs->f_files);
9162 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9163 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9164 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9165 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9166 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9167 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9168 unlock_user_struct(target_stfs, arg2, 1);
9171 case TARGET_NR_fstatfs:
9172 ret = get_errno(fstatfs(arg1, &stfs));
9173 goto convert_statfs;
9174 #ifdef TARGET_NR_statfs64
9175 case TARGET_NR_statfs64:
9176 if (!(p = lock_user_string(arg1)))
9178 ret = get_errno(statfs(path(p), &stfs));
9179 unlock_user(p, arg1, 0);
9181 if (!is_error(ret)) {
9182 struct target_statfs64 *target_stfs;
9184 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9186 __put_user(stfs.f_type, &target_stfs->f_type);
9187 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9188 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9189 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9190 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9191 __put_user(stfs.f_files, &target_stfs->f_files);
9192 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9193 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9194 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9195 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9196 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9197 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9198 unlock_user_struct(target_stfs, arg3, 1);
9201 case TARGET_NR_fstatfs64:
9202 ret = get_errno(fstatfs(arg1, &stfs));
9203 goto convert_statfs64;
9205 #ifdef TARGET_NR_ioperm
9206 case TARGET_NR_ioperm:
9209 #ifdef TARGET_NR_socketcall
9210 case TARGET_NR_socketcall:
9211 ret = do_socketcall(arg1, arg2);
9214 #ifdef TARGET_NR_accept
9215 case TARGET_NR_accept:
9216 ret = do_accept4(arg1, arg2, arg3, 0);
9219 #ifdef TARGET_NR_accept4
9220 case TARGET_NR_accept4:
9221 ret = do_accept4(arg1, arg2, arg3, arg4);
9224 #ifdef TARGET_NR_bind
9225 case TARGET_NR_bind:
9226 ret = do_bind(arg1, arg2, arg3);
9229 #ifdef TARGET_NR_connect
9230 case TARGET_NR_connect:
9231 ret = do_connect(arg1, arg2, arg3);
9234 #ifdef TARGET_NR_getpeername
9235 case TARGET_NR_getpeername:
9236 ret = do_getpeername(arg1, arg2, arg3);
9239 #ifdef TARGET_NR_getsockname
9240 case TARGET_NR_getsockname:
9241 ret = do_getsockname(arg1, arg2, arg3);
9244 #ifdef TARGET_NR_getsockopt
9245 case TARGET_NR_getsockopt:
9246 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9249 #ifdef TARGET_NR_listen
9250 case TARGET_NR_listen:
9251 ret = get_errno(listen(arg1, arg2));
9254 #ifdef TARGET_NR_recv
9255 case TARGET_NR_recv:
9256 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9259 #ifdef TARGET_NR_recvfrom
9260 case TARGET_NR_recvfrom:
9261 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9264 #ifdef TARGET_NR_recvmsg
9265 case TARGET_NR_recvmsg:
9266 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9269 #ifdef TARGET_NR_send
9270 case TARGET_NR_send:
9271 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9274 #ifdef TARGET_NR_sendmsg
9275 case TARGET_NR_sendmsg:
9276 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9279 #ifdef TARGET_NR_sendmmsg
9280 case TARGET_NR_sendmmsg:
9281 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9283 case TARGET_NR_recvmmsg:
9284 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9287 #ifdef TARGET_NR_sendto
9288 case TARGET_NR_sendto:
9289 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9292 #ifdef TARGET_NR_shutdown
9293 case TARGET_NR_shutdown:
9294 ret = get_errno(shutdown(arg1, arg2));
9297 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9298 case TARGET_NR_getrandom:
9299 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9303 ret = get_errno(getrandom(p, arg2, arg3));
9304 unlock_user(p, arg1, ret);
9307 #ifdef TARGET_NR_socket
9308 case TARGET_NR_socket:
9309 ret = do_socket(arg1, arg2, arg3);
9310 fd_trans_unregister(ret);
9313 #ifdef TARGET_NR_socketpair
9314 case TARGET_NR_socketpair:
9315 ret = do_socketpair(arg1, arg2, arg3, arg4);
9318 #ifdef TARGET_NR_setsockopt
9319 case TARGET_NR_setsockopt:
9320 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9324 case TARGET_NR_syslog:
9325 if (!(p = lock_user_string(arg2)))
9327 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9328 unlock_user(p, arg2, 0);
9331 case TARGET_NR_setitimer:
9333 struct itimerval value, ovalue, *pvalue;
9337 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9338 || copy_from_user_timeval(&pvalue->it_value,
9339 arg2 + sizeof(struct target_timeval)))
9344 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9345 if (!is_error(ret) && arg3) {
9346 if (copy_to_user_timeval(arg3,
9347 &ovalue.it_interval)
9348 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9354 case TARGET_NR_getitimer:
9356 struct itimerval value;
9358 ret = get_errno(getitimer(arg1, &value));
9359 if (!is_error(ret) && arg2) {
9360 if (copy_to_user_timeval(arg2,
9362 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9368 #ifdef TARGET_NR_stat
9369 case TARGET_NR_stat:
9370 if (!(p = lock_user_string(arg1)))
9372 ret = get_errno(stat(path(p), &st));
9373 unlock_user(p, arg1, 0);
9376 #ifdef TARGET_NR_lstat
9377 case TARGET_NR_lstat:
9378 if (!(p = lock_user_string(arg1)))
9380 ret = get_errno(lstat(path(p), &st));
9381 unlock_user(p, arg1, 0);
9384 case TARGET_NR_fstat:
9386 ret = get_errno(fstat(arg1, &st));
9387 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9390 if (!is_error(ret)) {
9391 struct target_stat *target_st;
9393 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9395 memset(target_st, 0, sizeof(*target_st));
9396 __put_user(st.st_dev, &target_st->st_dev);
9397 __put_user(st.st_ino, &target_st->st_ino);
9398 __put_user(st.st_mode, &target_st->st_mode);
9399 __put_user(st.st_uid, &target_st->st_uid);
9400 __put_user(st.st_gid, &target_st->st_gid);
9401 __put_user(st.st_nlink, &target_st->st_nlink);
9402 __put_user(st.st_rdev, &target_st->st_rdev);
9403 __put_user(st.st_size, &target_st->st_size);
9404 __put_user(st.st_blksize, &target_st->st_blksize);
9405 __put_user(st.st_blocks, &target_st->st_blocks);
9406 __put_user(st.st_atime, &target_st->target_st_atime);
9407 __put_user(st.st_mtime, &target_st->target_st_mtime);
9408 __put_user(st.st_ctime, &target_st->target_st_ctime);
9409 unlock_user_struct(target_st, arg2, 1);
9413 #ifdef TARGET_NR_olduname
9414 case TARGET_NR_olduname:
9417 #ifdef TARGET_NR_iopl
9418 case TARGET_NR_iopl:
9421 case TARGET_NR_vhangup:
9422 ret = get_errno(vhangup());
9424 #ifdef TARGET_NR_idle
9425 case TARGET_NR_idle:
9428 #ifdef TARGET_NR_syscall
9429 case TARGET_NR_syscall:
9430 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9431 arg6, arg7, arg8, 0);
9434 case TARGET_NR_wait4:
9437 abi_long status_ptr = arg2;
9438 struct rusage rusage, *rusage_ptr;
9439 abi_ulong target_rusage = arg4;
9440 abi_long rusage_err;
9442 rusage_ptr = &rusage;
9445 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9446 if (!is_error(ret)) {
9447 if (status_ptr && ret) {
9448 status = host_to_target_waitstatus(status);
9449 if (put_user_s32(status, status_ptr))
9452 if (target_rusage) {
9453 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9461 #ifdef TARGET_NR_swapoff
9462 case TARGET_NR_swapoff:
9463 if (!(p = lock_user_string(arg1)))
9465 ret = get_errno(swapoff(p));
9466 unlock_user(p, arg1, 0);
9469 case TARGET_NR_sysinfo:
9471 struct target_sysinfo *target_value;
9472 struct sysinfo value;
9473 ret = get_errno(sysinfo(&value));
9474 if (!is_error(ret) && arg1)
9476 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9478 __put_user(value.uptime, &target_value->uptime);
9479 __put_user(value.loads[0], &target_value->loads[0]);
9480 __put_user(value.loads[1], &target_value->loads[1]);
9481 __put_user(value.loads[2], &target_value->loads[2]);
9482 __put_user(value.totalram, &target_value->totalram);
9483 __put_user(value.freeram, &target_value->freeram);
9484 __put_user(value.sharedram, &target_value->sharedram);
9485 __put_user(value.bufferram, &target_value->bufferram);
9486 __put_user(value.totalswap, &target_value->totalswap);
9487 __put_user(value.freeswap, &target_value->freeswap);
9488 __put_user(value.procs, &target_value->procs);
9489 __put_user(value.totalhigh, &target_value->totalhigh);
9490 __put_user(value.freehigh, &target_value->freehigh);
9491 __put_user(value.mem_unit, &target_value->mem_unit);
9492 unlock_user_struct(target_value, arg1, 1);
9496 #ifdef TARGET_NR_ipc
9498 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9501 #ifdef TARGET_NR_semget
9502 case TARGET_NR_semget:
9503 ret = get_errno(semget(arg1, arg2, arg3));
9506 #ifdef TARGET_NR_semop
9507 case TARGET_NR_semop:
9508 ret = do_semop(arg1, arg2, arg3);
9511 #ifdef TARGET_NR_semctl
9512 case TARGET_NR_semctl:
9513 ret = do_semctl(arg1, arg2, arg3, arg4);
9516 #ifdef TARGET_NR_msgctl
9517 case TARGET_NR_msgctl:
9518 ret = do_msgctl(arg1, arg2, arg3);
9521 #ifdef TARGET_NR_msgget
9522 case TARGET_NR_msgget:
9523 ret = get_errno(msgget(arg1, arg2));
9526 #ifdef TARGET_NR_msgrcv
9527 case TARGET_NR_msgrcv:
9528 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9531 #ifdef TARGET_NR_msgsnd
9532 case TARGET_NR_msgsnd:
9533 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9536 #ifdef TARGET_NR_shmget
9537 case TARGET_NR_shmget:
9538 ret = get_errno(shmget(arg1, arg2, arg3));
9541 #ifdef TARGET_NR_shmctl
9542 case TARGET_NR_shmctl:
9543 ret = do_shmctl(arg1, arg2, arg3);
9546 #ifdef TARGET_NR_shmat
9547 case TARGET_NR_shmat:
9548 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9551 #ifdef TARGET_NR_shmdt
9552 case TARGET_NR_shmdt:
9553 ret = do_shmdt(arg1);
9556 case TARGET_NR_fsync:
9557 ret = get_errno(fsync(arg1));
9559 case TARGET_NR_clone:
9560 /* Linux manages to have three different orderings for its
9561 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9562 * match the kernel's CONFIG_CLONE_* settings.
9563 * Microblaze is further special in that it uses a sixth
9564 * implicit argument to clone for the TLS pointer.
9566 #if defined(TARGET_MICROBLAZE)
9567 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9568 #elif defined(TARGET_CLONE_BACKWARDS)
9569 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9570 #elif defined(TARGET_CLONE_BACKWARDS2)
9571 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9573 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9576 #ifdef __NR_exit_group
9577 /* new thread calls */
9578 case TARGET_NR_exit_group:
9582 gdb_exit(cpu_env, arg1);
9583 ret = get_errno(exit_group(arg1));
9586 case TARGET_NR_setdomainname:
9587 if (!(p = lock_user_string(arg1)))
9589 ret = get_errno(setdomainname(p, arg2));
9590 unlock_user(p, arg1, 0);
9592 case TARGET_NR_uname:
9593 /* no need to transcode because we use the linux syscall */
9595 struct new_utsname * buf;
9597 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9599 ret = get_errno(sys_uname(buf));
9600 if (!is_error(ret)) {
9601 /* Overwrite the native machine name with whatever is being
9603 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9604 /* Allow the user to override the reported release. */
9605 if (qemu_uname_release && *qemu_uname_release) {
9606 g_strlcpy(buf->release, qemu_uname_release,
9607 sizeof(buf->release));
9610 unlock_user_struct(buf, arg1, 1);
9614 case TARGET_NR_modify_ldt:
9615 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9617 #if !defined(TARGET_X86_64)
9618 case TARGET_NR_vm86old:
9620 case TARGET_NR_vm86:
9621 ret = do_vm86(cpu_env, arg1, arg2);
9625 case TARGET_NR_adjtimex:
9627 struct timex host_buf;
9629 if (target_to_host_timex(&host_buf, arg1) != 0) {
9632 ret = get_errno(adjtimex(&host_buf));
9633 if (!is_error(ret)) {
9634 if (host_to_target_timex(arg1, &host_buf) != 0) {
9640 #ifdef TARGET_NR_create_module
9641 case TARGET_NR_create_module:
9643 case TARGET_NR_init_module:
9644 case TARGET_NR_delete_module:
9645 #ifdef TARGET_NR_get_kernel_syms
9646 case TARGET_NR_get_kernel_syms:
9649 case TARGET_NR_quotactl:
9651 case TARGET_NR_getpgid:
9652 ret = get_errno(getpgid(arg1));
9654 case TARGET_NR_fchdir:
9655 ret = get_errno(fchdir(arg1));
9657 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9658 case TARGET_NR_bdflush:
9661 #ifdef TARGET_NR_sysfs
9662 case TARGET_NR_sysfs:
9665 case TARGET_NR_personality:
9666 ret = get_errno(personality(arg1));
9668 #ifdef TARGET_NR_afs_syscall
9669 case TARGET_NR_afs_syscall:
9672 #ifdef TARGET_NR__llseek /* Not on alpha */
9673 case TARGET_NR__llseek:
9676 #if !defined(__NR_llseek)
9677 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9679 ret = get_errno(res);
9684 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9686 if ((ret == 0) && put_user_s64(res, arg4)) {
9692 #ifdef TARGET_NR_getdents
9693 case TARGET_NR_getdents:
9694 #ifdef __NR_getdents
9695 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9697 struct target_dirent *target_dirp;
9698 struct linux_dirent *dirp;
9699 abi_long count = arg3;
9701 dirp = g_try_malloc(count);
9703 ret = -TARGET_ENOMEM;
9707 ret = get_errno(sys_getdents(arg1, dirp, count));
9708 if (!is_error(ret)) {
9709 struct linux_dirent *de;
9710 struct target_dirent *tde;
9712 int reclen, treclen;
9713 int count1, tnamelen;
9717 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9721 reclen = de->d_reclen;
9722 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9723 assert(tnamelen >= 0);
9724 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9725 assert(count1 + treclen <= count);
9726 tde->d_reclen = tswap16(treclen);
9727 tde->d_ino = tswapal(de->d_ino);
9728 tde->d_off = tswapal(de->d_off);
9729 memcpy(tde->d_name, de->d_name, tnamelen);
9730 de = (struct linux_dirent *)((char *)de + reclen);
9732 tde = (struct target_dirent *)((char *)tde + treclen);
9736 unlock_user(target_dirp, arg2, ret);
9742 struct linux_dirent *dirp;
9743 abi_long count = arg3;
9745 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9747 ret = get_errno(sys_getdents(arg1, dirp, count));
9748 if (!is_error(ret)) {
9749 struct linux_dirent *de;
9754 reclen = de->d_reclen;
9757 de->d_reclen = tswap16(reclen);
9758 tswapls(&de->d_ino);
9759 tswapls(&de->d_off);
9760 de = (struct linux_dirent *)((char *)de + reclen);
9764 unlock_user(dirp, arg2, ret);
9768 /* Implement getdents in terms of getdents64 */
9770 struct linux_dirent64 *dirp;
9771 abi_long count = arg3;
9773 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9777 ret = get_errno(sys_getdents64(arg1, dirp, count));
9778 if (!is_error(ret)) {
9779 /* Convert the dirent64 structs to target dirent. We do this
9780 * in-place, since we can guarantee that a target_dirent is no
9781 * larger than a dirent64; however this means we have to be
9782 * careful to read everything before writing in the new format.
9784 struct linux_dirent64 *de;
9785 struct target_dirent *tde;
9790 tde = (struct target_dirent *)dirp;
9792 int namelen, treclen;
9793 int reclen = de->d_reclen;
9794 uint64_t ino = de->d_ino;
9795 int64_t off = de->d_off;
9796 uint8_t type = de->d_type;
9798 namelen = strlen(de->d_name);
9799 treclen = offsetof(struct target_dirent, d_name)
9801 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9803 memmove(tde->d_name, de->d_name, namelen + 1);
9804 tde->d_ino = tswapal(ino);
9805 tde->d_off = tswapal(off);
9806 tde->d_reclen = tswap16(treclen);
9807 /* The target_dirent type is in what was formerly a padding
9808 * byte at the end of the structure:
9810 *(((char *)tde) + treclen - 1) = type;
9812 de = (struct linux_dirent64 *)((char *)de + reclen);
9813 tde = (struct target_dirent *)((char *)tde + treclen);
9819 unlock_user(dirp, arg2, ret);
9823 #endif /* TARGET_NR_getdents */
9824 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9825 case TARGET_NR_getdents64:
9827 struct linux_dirent64 *dirp;
9828 abi_long count = arg3;
9829 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9831 ret = get_errno(sys_getdents64(arg1, dirp, count));
9832 if (!is_error(ret)) {
9833 struct linux_dirent64 *de;
9838 reclen = de->d_reclen;
9841 de->d_reclen = tswap16(reclen);
9842 tswap64s((uint64_t *)&de->d_ino);
9843 tswap64s((uint64_t *)&de->d_off);
9844 de = (struct linux_dirent64 *)((char *)de + reclen);
9848 unlock_user(dirp, arg2, ret);
9851 #endif /* TARGET_NR_getdents64 */
9852 #if defined(TARGET_NR__newselect)
9853 case TARGET_NR__newselect:
9854 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9857 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9858 # ifdef TARGET_NR_poll
9859 case TARGET_NR_poll:
9861 # ifdef TARGET_NR_ppoll
9862 case TARGET_NR_ppoll:
9865 struct target_pollfd *target_pfd;
9866 unsigned int nfds = arg2;
9873 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9874 ret = -TARGET_EINVAL;
9878 target_pfd = lock_user(VERIFY_WRITE, arg1,
9879 sizeof(struct target_pollfd) * nfds, 1);
9884 pfd = alloca(sizeof(struct pollfd) * nfds);
9885 for (i = 0; i < nfds; i++) {
9886 pfd[i].fd = tswap32(target_pfd[i].fd);
9887 pfd[i].events = tswap16(target_pfd[i].events);
9892 # ifdef TARGET_NR_ppoll
9893 case TARGET_NR_ppoll:
9895 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9896 target_sigset_t *target_set;
9897 sigset_t _set, *set = &_set;
9900 if (target_to_host_timespec(timeout_ts, arg3)) {
9901 unlock_user(target_pfd, arg1, 0);
9909 if (arg5 != sizeof(target_sigset_t)) {
9910 unlock_user(target_pfd, arg1, 0);
9911 ret = -TARGET_EINVAL;
9915 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9917 unlock_user(target_pfd, arg1, 0);
9920 target_to_host_sigset(set, target_set);
9925 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9926 set, SIGSET_T_SIZE));
9928 if (!is_error(ret) && arg3) {
9929 host_to_target_timespec(arg3, timeout_ts);
9932 unlock_user(target_set, arg4, 0);
9937 # ifdef TARGET_NR_poll
9938 case TARGET_NR_poll:
9940 struct timespec ts, *pts;
9943 /* Convert ms to secs, ns */
9944 ts.tv_sec = arg3 / 1000;
9945 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9948 /* -ve poll() timeout means "infinite" */
9951 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9956 g_assert_not_reached();
9959 if (!is_error(ret)) {
9960 for(i = 0; i < nfds; i++) {
9961 target_pfd[i].revents = tswap16(pfd[i].revents);
9964 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9968 case TARGET_NR_flock:
9969 /* NOTE: the flock constant seems to be the same for every
9971 ret = get_errno(safe_flock(arg1, arg2));
9973 case TARGET_NR_readv:
9975 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9977 ret = get_errno(safe_readv(arg1, vec, arg3));
9978 unlock_iovec(vec, arg2, arg3, 1);
9980 ret = -host_to_target_errno(errno);
9984 case TARGET_NR_writev:
9986 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9988 ret = get_errno(safe_writev(arg1, vec, arg3));
9989 unlock_iovec(vec, arg2, arg3, 0);
9991 ret = -host_to_target_errno(errno);
9995 case TARGET_NR_getsid:
9996 ret = get_errno(getsid(arg1));
9998 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9999 case TARGET_NR_fdatasync:
10000 ret = get_errno(fdatasync(arg1));
10003 #ifdef TARGET_NR__sysctl
10004 case TARGET_NR__sysctl:
10005 /* We don't implement this, but ENOTDIR is always a safe
10007 ret = -TARGET_ENOTDIR;
10010 case TARGET_NR_sched_getaffinity:
10012 unsigned int mask_size;
10013 unsigned long *mask;
10016 * sched_getaffinity needs multiples of ulong, so need to take
10017 * care of mismatches between target ulong and host ulong sizes.
10019 if (arg2 & (sizeof(abi_ulong) - 1)) {
10020 ret = -TARGET_EINVAL;
10023 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10025 mask = alloca(mask_size);
10026 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10028 if (!is_error(ret)) {
10030 /* More data returned than the caller's buffer will fit.
10031 * This only happens if sizeof(abi_long) < sizeof(long)
10032 * and the caller passed us a buffer holding an odd number
10033 * of abi_longs. If the host kernel is actually using the
10034 * extra 4 bytes then fail EINVAL; otherwise we can just
10035 * ignore them and only copy the interesting part.
10037 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10038 if (numcpus > arg2 * 8) {
10039 ret = -TARGET_EINVAL;
10045 if (copy_to_user(arg3, mask, ret)) {
10051 case TARGET_NR_sched_setaffinity:
10053 unsigned int mask_size;
10054 unsigned long *mask;
10057 * sched_setaffinity needs multiples of ulong, so need to take
10058 * care of mismatches between target ulong and host ulong sizes.
10060 if (arg2 & (sizeof(abi_ulong) - 1)) {
10061 ret = -TARGET_EINVAL;
10064 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10066 mask = alloca(mask_size);
10067 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
10070 memcpy(mask, p, arg2);
10071 unlock_user_struct(p, arg2, 0);
10073 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10076 case TARGET_NR_sched_setparam:
10078 struct sched_param *target_schp;
10079 struct sched_param schp;
10082 return -TARGET_EINVAL;
10084 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10086 schp.sched_priority = tswap32(target_schp->sched_priority);
10087 unlock_user_struct(target_schp, arg2, 0);
10088 ret = get_errno(sched_setparam(arg1, &schp));
10091 case TARGET_NR_sched_getparam:
10093 struct sched_param *target_schp;
10094 struct sched_param schp;
10097 return -TARGET_EINVAL;
10099 ret = get_errno(sched_getparam(arg1, &schp));
10100 if (!is_error(ret)) {
10101 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10103 target_schp->sched_priority = tswap32(schp.sched_priority);
10104 unlock_user_struct(target_schp, arg2, 1);
10108 case TARGET_NR_sched_setscheduler:
10110 struct sched_param *target_schp;
10111 struct sched_param schp;
10113 return -TARGET_EINVAL;
10115 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10117 schp.sched_priority = tswap32(target_schp->sched_priority);
10118 unlock_user_struct(target_schp, arg3, 0);
10119 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10122 case TARGET_NR_sched_getscheduler:
10123 ret = get_errno(sched_getscheduler(arg1));
10125 case TARGET_NR_sched_yield:
10126 ret = get_errno(sched_yield());
10128 case TARGET_NR_sched_get_priority_max:
10129 ret = get_errno(sched_get_priority_max(arg1));
10131 case TARGET_NR_sched_get_priority_min:
10132 ret = get_errno(sched_get_priority_min(arg1));
10134 case TARGET_NR_sched_rr_get_interval:
10136 struct timespec ts;
10137 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10138 if (!is_error(ret)) {
10139 ret = host_to_target_timespec(arg2, &ts);
10143 case TARGET_NR_nanosleep:
10145 struct timespec req, rem;
10146 target_to_host_timespec(&req, arg1);
10147 ret = get_errno(safe_nanosleep(&req, &rem));
10148 if (is_error(ret) && arg2) {
10149 host_to_target_timespec(arg2, &rem);
10153 #ifdef TARGET_NR_query_module
10154 case TARGET_NR_query_module:
10155 goto unimplemented;
10157 #ifdef TARGET_NR_nfsservctl
10158 case TARGET_NR_nfsservctl:
10159 goto unimplemented;
10161 case TARGET_NR_prctl:
10163 case PR_GET_PDEATHSIG:
10166 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10167 if (!is_error(ret) && arg2
10168 && put_user_ual(deathsig, arg2)) {
10176 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10180 ret = get_errno(prctl(arg1, (unsigned long)name,
10181 arg3, arg4, arg5));
10182 unlock_user(name, arg2, 16);
10187 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10191 ret = get_errno(prctl(arg1, (unsigned long)name,
10192 arg3, arg4, arg5));
10193 unlock_user(name, arg2, 0);
10198 /* Most prctl options have no pointer arguments */
10199 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10203 #ifdef TARGET_NR_arch_prctl
10204 case TARGET_NR_arch_prctl:
10205 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10206 ret = do_arch_prctl(cpu_env, arg1, arg2);
10209 goto unimplemented;
10212 #ifdef TARGET_NR_pread64
10213 case TARGET_NR_pread64:
10214 if (regpairs_aligned(cpu_env)) {
10218 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10220 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10221 unlock_user(p, arg2, ret);
10223 case TARGET_NR_pwrite64:
10224 if (regpairs_aligned(cpu_env)) {
10228 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10230 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10231 unlock_user(p, arg2, 0);
10234 case TARGET_NR_getcwd:
10235 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10237 ret = get_errno(sys_getcwd1(p, arg2));
10238 unlock_user(p, arg1, ret);
10240 case TARGET_NR_capget:
10241 case TARGET_NR_capset:
10243 struct target_user_cap_header *target_header;
10244 struct target_user_cap_data *target_data = NULL;
10245 struct __user_cap_header_struct header;
10246 struct __user_cap_data_struct data[2];
10247 struct __user_cap_data_struct *dataptr = NULL;
10248 int i, target_datalen;
10249 int data_items = 1;
10251 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10254 header.version = tswap32(target_header->version);
10255 header.pid = tswap32(target_header->pid);
10257 if (header.version != _LINUX_CAPABILITY_VERSION) {
10258 /* Version 2 and up takes pointer to two user_data structs */
10262 target_datalen = sizeof(*target_data) * data_items;
10265 if (num == TARGET_NR_capget) {
10266 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10268 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10270 if (!target_data) {
10271 unlock_user_struct(target_header, arg1, 0);
10275 if (num == TARGET_NR_capset) {
10276 for (i = 0; i < data_items; i++) {
10277 data[i].effective = tswap32(target_data[i].effective);
10278 data[i].permitted = tswap32(target_data[i].permitted);
10279 data[i].inheritable = tswap32(target_data[i].inheritable);
10286 if (num == TARGET_NR_capget) {
10287 ret = get_errno(capget(&header, dataptr));
10289 ret = get_errno(capset(&header, dataptr));
10292 /* The kernel always updates version for both capget and capset */
10293 target_header->version = tswap32(header.version);
10294 unlock_user_struct(target_header, arg1, 1);
10297 if (num == TARGET_NR_capget) {
10298 for (i = 0; i < data_items; i++) {
10299 target_data[i].effective = tswap32(data[i].effective);
10300 target_data[i].permitted = tswap32(data[i].permitted);
10301 target_data[i].inheritable = tswap32(data[i].inheritable);
10303 unlock_user(target_data, arg2, target_datalen);
10305 unlock_user(target_data, arg2, 0);
10310 case TARGET_NR_sigaltstack:
10311 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10314 #ifdef CONFIG_SENDFILE
10315 case TARGET_NR_sendfile:
10317 off_t *offp = NULL;
10320 ret = get_user_sal(off, arg3);
10321 if (is_error(ret)) {
10326 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10327 if (!is_error(ret) && arg3) {
10328 abi_long ret2 = put_user_sal(off, arg3);
10329 if (is_error(ret2)) {
10335 #ifdef TARGET_NR_sendfile64
10336 case TARGET_NR_sendfile64:
10338 off_t *offp = NULL;
10341 ret = get_user_s64(off, arg3);
10342 if (is_error(ret)) {
10347 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10348 if (!is_error(ret) && arg3) {
10349 abi_long ret2 = put_user_s64(off, arg3);
10350 if (is_error(ret2)) {
10358 case TARGET_NR_sendfile:
10359 #ifdef TARGET_NR_sendfile64
10360 case TARGET_NR_sendfile64:
10362 goto unimplemented;
10365 #ifdef TARGET_NR_getpmsg
10366 case TARGET_NR_getpmsg:
10367 goto unimplemented;
10369 #ifdef TARGET_NR_putpmsg
10370 case TARGET_NR_putpmsg:
10371 goto unimplemented;
10373 #ifdef TARGET_NR_vfork
10374 case TARGET_NR_vfork:
10375 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10379 #ifdef TARGET_NR_ugetrlimit
10380 case TARGET_NR_ugetrlimit:
10382 struct rlimit rlim;
10383 int resource = target_to_host_resource(arg1);
10384 ret = get_errno(getrlimit(resource, &rlim));
10385 if (!is_error(ret)) {
10386 struct target_rlimit *target_rlim;
10387 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10389 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10390 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10391 unlock_user_struct(target_rlim, arg2, 1);
10396 #ifdef TARGET_NR_truncate64
10397 case TARGET_NR_truncate64:
10398 if (!(p = lock_user_string(arg1)))
10400 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10401 unlock_user(p, arg1, 0);
10404 #ifdef TARGET_NR_ftruncate64
10405 case TARGET_NR_ftruncate64:
10406 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10409 #ifdef TARGET_NR_stat64
10410 case TARGET_NR_stat64:
10411 if (!(p = lock_user_string(arg1)))
10413 ret = get_errno(stat(path(p), &st));
10414 unlock_user(p, arg1, 0);
10415 if (!is_error(ret))
10416 ret = host_to_target_stat64(cpu_env, arg2, &st);
10419 #ifdef TARGET_NR_lstat64
10420 case TARGET_NR_lstat64:
10421 if (!(p = lock_user_string(arg1)))
10423 ret = get_errno(lstat(path(p), &st));
10424 unlock_user(p, arg1, 0);
10425 if (!is_error(ret))
10426 ret = host_to_target_stat64(cpu_env, arg2, &st);
10429 #ifdef TARGET_NR_fstat64
10430 case TARGET_NR_fstat64:
10431 ret = get_errno(fstat(arg1, &st));
10432 if (!is_error(ret))
10433 ret = host_to_target_stat64(cpu_env, arg2, &st);
10436 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10437 #ifdef TARGET_NR_fstatat64
10438 case TARGET_NR_fstatat64:
10440 #ifdef TARGET_NR_newfstatat
10441 case TARGET_NR_newfstatat:
10443 if (!(p = lock_user_string(arg2)))
10445 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10446 if (!is_error(ret))
10447 ret = host_to_target_stat64(cpu_env, arg3, &st);
10450 #ifdef TARGET_NR_lchown
10451 case TARGET_NR_lchown:
10452 if (!(p = lock_user_string(arg1)))
10454 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10455 unlock_user(p, arg1, 0);
10458 #ifdef TARGET_NR_getuid
10459 case TARGET_NR_getuid:
10460 ret = get_errno(high2lowuid(getuid()));
10463 #ifdef TARGET_NR_getgid
10464 case TARGET_NR_getgid:
10465 ret = get_errno(high2lowgid(getgid()));
10468 #ifdef TARGET_NR_geteuid
10469 case TARGET_NR_geteuid:
10470 ret = get_errno(high2lowuid(geteuid()));
10473 #ifdef TARGET_NR_getegid
10474 case TARGET_NR_getegid:
10475 ret = get_errno(high2lowgid(getegid()));
10478 case TARGET_NR_setreuid:
10479 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10481 case TARGET_NR_setregid:
10482 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10484 case TARGET_NR_getgroups:
10486 int gidsetsize = arg1;
10487 target_id *target_grouplist;
10491 grouplist = alloca(gidsetsize * sizeof(gid_t));
10492 ret = get_errno(getgroups(gidsetsize, grouplist));
10493 if (gidsetsize == 0)
10495 if (!is_error(ret)) {
10496 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10497 if (!target_grouplist)
10499 for(i = 0;i < ret; i++)
10500 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10501 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10505 case TARGET_NR_setgroups:
10507 int gidsetsize = arg1;
10508 target_id *target_grouplist;
10509 gid_t *grouplist = NULL;
10512 grouplist = alloca(gidsetsize * sizeof(gid_t));
10513 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10514 if (!target_grouplist) {
10515 ret = -TARGET_EFAULT;
10518 for (i = 0; i < gidsetsize; i++) {
10519 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10521 unlock_user(target_grouplist, arg2, 0);
10523 ret = get_errno(setgroups(gidsetsize, grouplist));
10526 case TARGET_NR_fchown:
10527 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10529 #if defined(TARGET_NR_fchownat)
10530 case TARGET_NR_fchownat:
10531 if (!(p = lock_user_string(arg2)))
10533 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10534 low2highgid(arg4), arg5));
10535 unlock_user(p, arg2, 0);
10538 #ifdef TARGET_NR_setresuid
10539 case TARGET_NR_setresuid:
10540 ret = get_errno(sys_setresuid(low2highuid(arg1),
10542 low2highuid(arg3)));
10545 #ifdef TARGET_NR_getresuid
10546 case TARGET_NR_getresuid:
10548 uid_t ruid, euid, suid;
10549 ret = get_errno(getresuid(&ruid, &euid, &suid));
10550 if (!is_error(ret)) {
10551 if (put_user_id(high2lowuid(ruid), arg1)
10552 || put_user_id(high2lowuid(euid), arg2)
10553 || put_user_id(high2lowuid(suid), arg3))
10559 #ifdef TARGET_NR_getresgid
10560 case TARGET_NR_setresgid:
10561 ret = get_errno(sys_setresgid(low2highgid(arg1),
10563 low2highgid(arg3)));
10566 #ifdef TARGET_NR_getresgid
10567 case TARGET_NR_getresgid:
10569 gid_t rgid, egid, sgid;
10570 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10571 if (!is_error(ret)) {
10572 if (put_user_id(high2lowgid(rgid), arg1)
10573 || put_user_id(high2lowgid(egid), arg2)
10574 || put_user_id(high2lowgid(sgid), arg3))
10580 #ifdef TARGET_NR_chown
10581 case TARGET_NR_chown:
10582 if (!(p = lock_user_string(arg1)))
10584 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10585 unlock_user(p, arg1, 0);
10588 case TARGET_NR_setuid:
10589 ret = get_errno(sys_setuid(low2highuid(arg1)));
10591 case TARGET_NR_setgid:
10592 ret = get_errno(sys_setgid(low2highgid(arg1)));
10594 case TARGET_NR_setfsuid:
10595 ret = get_errno(setfsuid(arg1));
10597 case TARGET_NR_setfsgid:
10598 ret = get_errno(setfsgid(arg1));
10601 #ifdef TARGET_NR_lchown32
10602 case TARGET_NR_lchown32:
10603 if (!(p = lock_user_string(arg1)))
10605 ret = get_errno(lchown(p, arg2, arg3));
10606 unlock_user(p, arg1, 0);
10609 #ifdef TARGET_NR_getuid32
10610 case TARGET_NR_getuid32:
10611 ret = get_errno(getuid());
10615 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10616 /* Alpha specific */
10617 case TARGET_NR_getxuid:
10621 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10623 ret = get_errno(getuid());
10626 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10627 /* Alpha specific */
10628 case TARGET_NR_getxgid:
10632 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10634 ret = get_errno(getgid());
10637 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10638 /* Alpha specific */
10639 case TARGET_NR_osf_getsysinfo:
10640 ret = -TARGET_EOPNOTSUPP;
10642 case TARGET_GSI_IEEE_FP_CONTROL:
10644 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10646 /* Copied from linux ieee_fpcr_to_swcr. */
10647 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10648 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10649 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10650 | SWCR_TRAP_ENABLE_DZE
10651 | SWCR_TRAP_ENABLE_OVF);
10652 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10653 | SWCR_TRAP_ENABLE_INE);
10654 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10655 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10657 if (put_user_u64 (swcr, arg2))
10663 /* case GSI_IEEE_STATE_AT_SIGNAL:
10664 -- Not implemented in linux kernel.
10666 -- Retrieves current unaligned access state; not much used.
10667 case GSI_PROC_TYPE:
10668 -- Retrieves implver information; surely not used.
10669 case GSI_GET_HWRPB:
10670 -- Grabs a copy of the HWRPB; surely not used.
10675 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10676 /* Alpha specific */
10677 case TARGET_NR_osf_setsysinfo:
10678 ret = -TARGET_EOPNOTSUPP;
10680 case TARGET_SSI_IEEE_FP_CONTROL:
10682 uint64_t swcr, fpcr, orig_fpcr;
10684 if (get_user_u64 (swcr, arg2)) {
10687 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10688 fpcr = orig_fpcr & FPCR_DYN_MASK;
10690 /* Copied from linux ieee_swcr_to_fpcr. */
10691 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10692 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10693 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10694 | SWCR_TRAP_ENABLE_DZE
10695 | SWCR_TRAP_ENABLE_OVF)) << 48;
10696 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10697 | SWCR_TRAP_ENABLE_INE)) << 57;
10698 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10699 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10701 cpu_alpha_store_fpcr(cpu_env, fpcr);
10706 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10708 uint64_t exc, fpcr, orig_fpcr;
10711 if (get_user_u64(exc, arg2)) {
10715 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10717 /* We only add to the exception status here. */
10718 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10720 cpu_alpha_store_fpcr(cpu_env, fpcr);
10723 /* Old exceptions are not signaled. */
10724 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10726 /* If any exceptions set by this call,
10727 and are unmasked, send a signal. */
10729 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10730 si_code = TARGET_FPE_FLTRES;
10732 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10733 si_code = TARGET_FPE_FLTUND;
10735 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10736 si_code = TARGET_FPE_FLTOVF;
10738 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10739 si_code = TARGET_FPE_FLTDIV;
10741 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10742 si_code = TARGET_FPE_FLTINV;
10744 if (si_code != 0) {
10745 target_siginfo_t info;
10746 info.si_signo = SIGFPE;
10748 info.si_code = si_code;
10749 info._sifields._sigfault._addr
10750 = ((CPUArchState *)cpu_env)->pc;
10751 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10752 QEMU_SI_FAULT, &info);
10757 /* case SSI_NVPAIRS:
10758 -- Used with SSIN_UACPROC to enable unaligned accesses.
10759 case SSI_IEEE_STATE_AT_SIGNAL:
10760 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10761 -- Not implemented in linux kernel
10766 #ifdef TARGET_NR_osf_sigprocmask
10767 /* Alpha specific. */
10768 case TARGET_NR_osf_sigprocmask:
10772 sigset_t set, oldset;
10775 case TARGET_SIG_BLOCK:
10778 case TARGET_SIG_UNBLOCK:
10781 case TARGET_SIG_SETMASK:
10785 ret = -TARGET_EINVAL;
10789 target_to_host_old_sigset(&set, &mask);
10790 ret = do_sigprocmask(how, &set, &oldset);
10792 host_to_target_old_sigset(&mask, &oldset);
10799 #ifdef TARGET_NR_getgid32
10800 case TARGET_NR_getgid32:
10801 ret = get_errno(getgid());
10804 #ifdef TARGET_NR_geteuid32
10805 case TARGET_NR_geteuid32:
10806 ret = get_errno(geteuid());
10809 #ifdef TARGET_NR_getegid32
10810 case TARGET_NR_getegid32:
10811 ret = get_errno(getegid());
10814 #ifdef TARGET_NR_setreuid32
10815 case TARGET_NR_setreuid32:
10816 ret = get_errno(setreuid(arg1, arg2));
10819 #ifdef TARGET_NR_setregid32
10820 case TARGET_NR_setregid32:
10821 ret = get_errno(setregid(arg1, arg2));
10824 #ifdef TARGET_NR_getgroups32
10825 case TARGET_NR_getgroups32:
10827 int gidsetsize = arg1;
10828 uint32_t *target_grouplist;
10832 grouplist = alloca(gidsetsize * sizeof(gid_t));
10833 ret = get_errno(getgroups(gidsetsize, grouplist));
10834 if (gidsetsize == 0)
10836 if (!is_error(ret)) {
10837 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10838 if (!target_grouplist) {
10839 ret = -TARGET_EFAULT;
10842 for(i = 0;i < ret; i++)
10843 target_grouplist[i] = tswap32(grouplist[i]);
10844 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10849 #ifdef TARGET_NR_setgroups32
10850 case TARGET_NR_setgroups32:
10852 int gidsetsize = arg1;
10853 uint32_t *target_grouplist;
10857 grouplist = alloca(gidsetsize * sizeof(gid_t));
10858 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10859 if (!target_grouplist) {
10860 ret = -TARGET_EFAULT;
10863 for(i = 0;i < gidsetsize; i++)
10864 grouplist[i] = tswap32(target_grouplist[i]);
10865 unlock_user(target_grouplist, arg2, 0);
10866 ret = get_errno(setgroups(gidsetsize, grouplist));
10870 #ifdef TARGET_NR_fchown32
10871 case TARGET_NR_fchown32:
10872 ret = get_errno(fchown(arg1, arg2, arg3));
10875 #ifdef TARGET_NR_setresuid32
10876 case TARGET_NR_setresuid32:
10877 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10880 #ifdef TARGET_NR_getresuid32
10881 case TARGET_NR_getresuid32:
10883 uid_t ruid, euid, suid;
10884 ret = get_errno(getresuid(&ruid, &euid, &suid));
10885 if (!is_error(ret)) {
10886 if (put_user_u32(ruid, arg1)
10887 || put_user_u32(euid, arg2)
10888 || put_user_u32(suid, arg3))
10894 #ifdef TARGET_NR_setresgid32
10895 case TARGET_NR_setresgid32:
10896 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10899 #ifdef TARGET_NR_getresgid32
10900 case TARGET_NR_getresgid32:
10902 gid_t rgid, egid, sgid;
10903 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10904 if (!is_error(ret)) {
10905 if (put_user_u32(rgid, arg1)
10906 || put_user_u32(egid, arg2)
10907 || put_user_u32(sgid, arg3))
10913 #ifdef TARGET_NR_chown32
10914 case TARGET_NR_chown32:
10915 if (!(p = lock_user_string(arg1)))
10917 ret = get_errno(chown(p, arg2, arg3));
10918 unlock_user(p, arg1, 0);
10921 #ifdef TARGET_NR_setuid32
10922 case TARGET_NR_setuid32:
10923 ret = get_errno(sys_setuid(arg1));
10926 #ifdef TARGET_NR_setgid32
10927 case TARGET_NR_setgid32:
10928 ret = get_errno(sys_setgid(arg1));
10931 #ifdef TARGET_NR_setfsuid32
10932 case TARGET_NR_setfsuid32:
10933 ret = get_errno(setfsuid(arg1));
10936 #ifdef TARGET_NR_setfsgid32
10937 case TARGET_NR_setfsgid32:
10938 ret = get_errno(setfsgid(arg1));
10942 case TARGET_NR_pivot_root:
10943 goto unimplemented;
10944 #ifdef TARGET_NR_mincore
10945 case TARGET_NR_mincore:
10948 ret = -TARGET_EFAULT;
10949 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10951 if (!(p = lock_user_string(arg3)))
10953 ret = get_errno(mincore(a, arg2, p));
10954 unlock_user(p, arg3, ret);
10956 unlock_user(a, arg1, 0);
10960 #ifdef TARGET_NR_arm_fadvise64_64
10961 case TARGET_NR_arm_fadvise64_64:
10962 /* arm_fadvise64_64 looks like fadvise64_64 but
10963 * with different argument order: fd, advice, offset, len
10964 * rather than the usual fd, offset, len, advice.
10965 * Note that offset and len are both 64-bit so appear as
10966 * pairs of 32-bit registers.
10968 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10969 target_offset64(arg5, arg6), arg2);
10970 ret = -host_to_target_errno(ret);
10974 #if TARGET_ABI_BITS == 32
10976 #ifdef TARGET_NR_fadvise64_64
10977 case TARGET_NR_fadvise64_64:
10978 /* 6 args: fd, offset (high, low), len (high, low), advice */
10979 if (regpairs_aligned(cpu_env)) {
10980 /* offset is in (3,4), len in (5,6) and advice in 7 */
10987 ret = -host_to_target_errno(posix_fadvise(arg1,
10988 target_offset64(arg2, arg3),
10989 target_offset64(arg4, arg5),
10994 #ifdef TARGET_NR_fadvise64
10995 case TARGET_NR_fadvise64:
10996 /* 5 args: fd, offset (high, low), len, advice */
10997 if (regpairs_aligned(cpu_env)) {
10998 /* offset is in (3,4), len in 5 and advice in 6 */
11004 ret = -host_to_target_errno(posix_fadvise(arg1,
11005 target_offset64(arg2, arg3),
11010 #else /* not a 32-bit ABI */
11011 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11012 #ifdef TARGET_NR_fadvise64_64
11013 case TARGET_NR_fadvise64_64:
11015 #ifdef TARGET_NR_fadvise64
11016 case TARGET_NR_fadvise64:
11018 #ifdef TARGET_S390X
11020 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11021 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11022 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11023 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11027 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11030 #endif /* end of 64-bit ABI fadvise handling */
11032 #ifdef TARGET_NR_madvise
11033 case TARGET_NR_madvise:
11034 /* A straight passthrough may not be safe because qemu sometimes
11035 turns private file-backed mappings into anonymous mappings.
11036 This will break MADV_DONTNEED.
11037 This is a hint, so ignoring and returning success is ok. */
11038 ret = get_errno(0);
11041 #if TARGET_ABI_BITS == 32
11042 case TARGET_NR_fcntl64:
11046 from_flock64_fn *copyfrom = copy_from_user_flock64;
11047 to_flock64_fn *copyto = copy_to_user_flock64;
11050 if (((CPUARMState *)cpu_env)->eabi) {
11051 copyfrom = copy_from_user_eabi_flock64;
11052 copyto = copy_to_user_eabi_flock64;
11056 cmd = target_to_host_fcntl_cmd(arg2);
11057 if (cmd == -TARGET_EINVAL) {
11063 case TARGET_F_GETLK64:
11064 ret = copyfrom(&fl, arg3);
11068 ret = get_errno(fcntl(arg1, cmd, &fl));
11070 ret = copyto(arg3, &fl);
11074 case TARGET_F_SETLK64:
11075 case TARGET_F_SETLKW64:
11076 ret = copyfrom(&fl, arg3);
11080 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11083 ret = do_fcntl(arg1, arg2, arg3);
11089 #ifdef TARGET_NR_cacheflush
11090 case TARGET_NR_cacheflush:
11091 /* self-modifying code is handled automatically, so nothing needed */
11095 #ifdef TARGET_NR_security
11096 case TARGET_NR_security:
11097 goto unimplemented;
11099 #ifdef TARGET_NR_getpagesize
11100 case TARGET_NR_getpagesize:
11101 ret = TARGET_PAGE_SIZE;
11104 case TARGET_NR_gettid:
11105 ret = get_errno(gettid());
11107 #ifdef TARGET_NR_readahead
11108 case TARGET_NR_readahead:
11109 #if TARGET_ABI_BITS == 32
11110 if (regpairs_aligned(cpu_env)) {
11115 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
11117 ret = get_errno(readahead(arg1, arg2, arg3));
11122 #ifdef TARGET_NR_setxattr
11123 case TARGET_NR_listxattr:
11124 case TARGET_NR_llistxattr:
11128 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11130 ret = -TARGET_EFAULT;
11134 p = lock_user_string(arg1);
11136 if (num == TARGET_NR_listxattr) {
11137 ret = get_errno(listxattr(p, b, arg3));
11139 ret = get_errno(llistxattr(p, b, arg3));
11142 ret = -TARGET_EFAULT;
11144 unlock_user(p, arg1, 0);
11145 unlock_user(b, arg2, arg3);
11148 case TARGET_NR_flistxattr:
11152 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11154 ret = -TARGET_EFAULT;
11158 ret = get_errno(flistxattr(arg1, b, arg3));
11159 unlock_user(b, arg2, arg3);
11162 case TARGET_NR_setxattr:
11163 case TARGET_NR_lsetxattr:
11165 void *p, *n, *v = 0;
11167 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11169 ret = -TARGET_EFAULT;
11173 p = lock_user_string(arg1);
11174 n = lock_user_string(arg2);
11176 if (num == TARGET_NR_setxattr) {
11177 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11179 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11182 ret = -TARGET_EFAULT;
11184 unlock_user(p, arg1, 0);
11185 unlock_user(n, arg2, 0);
11186 unlock_user(v, arg3, 0);
11189 case TARGET_NR_fsetxattr:
11193 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11195 ret = -TARGET_EFAULT;
11199 n = lock_user_string(arg2);
11201 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11203 ret = -TARGET_EFAULT;
11205 unlock_user(n, arg2, 0);
11206 unlock_user(v, arg3, 0);
11209 case TARGET_NR_getxattr:
11210 case TARGET_NR_lgetxattr:
11212 void *p, *n, *v = 0;
11214 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11216 ret = -TARGET_EFAULT;
11220 p = lock_user_string(arg1);
11221 n = lock_user_string(arg2);
11223 if (num == TARGET_NR_getxattr) {
11224 ret = get_errno(getxattr(p, n, v, arg4));
11226 ret = get_errno(lgetxattr(p, n, v, arg4));
11229 ret = -TARGET_EFAULT;
11231 unlock_user(p, arg1, 0);
11232 unlock_user(n, arg2, 0);
11233 unlock_user(v, arg3, arg4);
11236 case TARGET_NR_fgetxattr:
11240 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11242 ret = -TARGET_EFAULT;
11246 n = lock_user_string(arg2);
11248 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11250 ret = -TARGET_EFAULT;
11252 unlock_user(n, arg2, 0);
11253 unlock_user(v, arg3, arg4);
11256 case TARGET_NR_removexattr:
11257 case TARGET_NR_lremovexattr:
11260 p = lock_user_string(arg1);
11261 n = lock_user_string(arg2);
11263 if (num == TARGET_NR_removexattr) {
11264 ret = get_errno(removexattr(p, n));
11266 ret = get_errno(lremovexattr(p, n));
11269 ret = -TARGET_EFAULT;
11271 unlock_user(p, arg1, 0);
11272 unlock_user(n, arg2, 0);
11275 case TARGET_NR_fremovexattr:
11278 n = lock_user_string(arg2);
11280 ret = get_errno(fremovexattr(arg1, n));
11282 ret = -TARGET_EFAULT;
11284 unlock_user(n, arg2, 0);
11288 #endif /* CONFIG_ATTR */
11289 #ifdef TARGET_NR_set_thread_area
11290 case TARGET_NR_set_thread_area:
11291 #if defined(TARGET_MIPS)
11292 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11295 #elif defined(TARGET_CRIS)
11297 ret = -TARGET_EINVAL;
11299 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11303 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11304 ret = do_set_thread_area(cpu_env, arg1);
11306 #elif defined(TARGET_M68K)
11308 TaskState *ts = cpu->opaque;
11309 ts->tp_value = arg1;
11314 goto unimplemented_nowarn;
11317 #ifdef TARGET_NR_get_thread_area
11318 case TARGET_NR_get_thread_area:
11319 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11320 ret = do_get_thread_area(cpu_env, arg1);
11322 #elif defined(TARGET_M68K)
11324 TaskState *ts = cpu->opaque;
11325 ret = ts->tp_value;
11329 goto unimplemented_nowarn;
11332 #ifdef TARGET_NR_getdomainname
11333 case TARGET_NR_getdomainname:
11334 goto unimplemented_nowarn;
11337 #ifdef TARGET_NR_clock_gettime
11338 case TARGET_NR_clock_gettime:
11340 struct timespec ts;
11341 ret = get_errno(clock_gettime(arg1, &ts));
11342 if (!is_error(ret)) {
11343 host_to_target_timespec(arg2, &ts);
11348 #ifdef TARGET_NR_clock_getres
11349 case TARGET_NR_clock_getres:
11351 struct timespec ts;
11352 ret = get_errno(clock_getres(arg1, &ts));
11353 if (!is_error(ret)) {
11354 host_to_target_timespec(arg2, &ts);
11359 #ifdef TARGET_NR_clock_nanosleep
11360 case TARGET_NR_clock_nanosleep:
11362 struct timespec ts;
11363 target_to_host_timespec(&ts, arg3);
11364 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11365 &ts, arg4 ? &ts : NULL));
11367 host_to_target_timespec(arg4, &ts);
11369 #if defined(TARGET_PPC)
11370 /* clock_nanosleep is odd in that it returns positive errno values.
11371 * On PPC, CR0 bit 3 should be set in such a situation. */
11372 if (ret && ret != -TARGET_ERESTARTSYS) {
11373 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11380 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11381 case TARGET_NR_set_tid_address:
11382 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11386 case TARGET_NR_tkill:
11387 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11390 case TARGET_NR_tgkill:
11391 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11392 target_to_host_signal(arg3)));
11395 #ifdef TARGET_NR_set_robust_list
11396 case TARGET_NR_set_robust_list:
11397 case TARGET_NR_get_robust_list:
11398 /* The ABI for supporting robust futexes has userspace pass
11399 * the kernel a pointer to a linked list which is updated by
11400 * userspace after the syscall; the list is walked by the kernel
11401 * when the thread exits. Since the linked list in QEMU guest
11402 * memory isn't a valid linked list for the host and we have
11403 * no way to reliably intercept the thread-death event, we can't
11404 * support these. Silently return ENOSYS so that guest userspace
11405 * falls back to a non-robust futex implementation (which should
11406 * be OK except in the corner case of the guest crashing while
11407 * holding a mutex that is shared with another process via
11410 goto unimplemented_nowarn;
11413 #if defined(TARGET_NR_utimensat)
11414 case TARGET_NR_utimensat:
11416 struct timespec *tsp, ts[2];
11420 target_to_host_timespec(ts, arg3);
11421 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11425 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11427 if (!(p = lock_user_string(arg2))) {
11428 ret = -TARGET_EFAULT;
11431 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11432 unlock_user(p, arg2, 0);
11437 case TARGET_NR_futex:
11438 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11440 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11441 case TARGET_NR_inotify_init:
11442 ret = get_errno(sys_inotify_init());
11445 #ifdef CONFIG_INOTIFY1
11446 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11447 case TARGET_NR_inotify_init1:
11448 ret = get_errno(sys_inotify_init1(arg1));
11452 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11453 case TARGET_NR_inotify_add_watch:
11454 p = lock_user_string(arg2);
11455 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11456 unlock_user(p, arg2, 0);
11459 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11460 case TARGET_NR_inotify_rm_watch:
11461 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11465 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11466 case TARGET_NR_mq_open:
11468 struct mq_attr posix_mq_attr;
11471 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11472 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11475 p = lock_user_string(arg1 - 1);
11479 ret = get_errno(mq_open(p, host_flags, arg3, &posix_mq_attr));
11480 unlock_user (p, arg1, 0);
11484 case TARGET_NR_mq_unlink:
11485 p = lock_user_string(arg1 - 1);
11487 ret = -TARGET_EFAULT;
11490 ret = get_errno(mq_unlink(p));
11491 unlock_user (p, arg1, 0);
11494 case TARGET_NR_mq_timedsend:
11496 struct timespec ts;
11498 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11500 target_to_host_timespec(&ts, arg5);
11501 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11502 host_to_target_timespec(arg5, &ts);
11504 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11506 unlock_user (p, arg2, arg3);
11510 case TARGET_NR_mq_timedreceive:
11512 struct timespec ts;
11515 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11517 target_to_host_timespec(&ts, arg5);
11518 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11520 host_to_target_timespec(arg5, &ts);
11522 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11525 unlock_user (p, arg2, arg3);
11527 put_user_u32(prio, arg4);
11531 /* Not implemented for now... */
11532 /* case TARGET_NR_mq_notify: */
11535 case TARGET_NR_mq_getsetattr:
11537 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11540 ret = mq_getattr(arg1, &posix_mq_attr_out);
11541 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11544 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11545 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11552 #ifdef CONFIG_SPLICE
11553 #ifdef TARGET_NR_tee
11554 case TARGET_NR_tee:
11556 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11560 #ifdef TARGET_NR_splice
11561 case TARGET_NR_splice:
11563 loff_t loff_in, loff_out;
11564 loff_t *ploff_in = NULL, *ploff_out = NULL;
11566 if (get_user_u64(loff_in, arg2)) {
11569 ploff_in = &loff_in;
11572 if (get_user_u64(loff_out, arg4)) {
11575 ploff_out = &loff_out;
11577 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11579 if (put_user_u64(loff_in, arg2)) {
11584 if (put_user_u64(loff_out, arg4)) {
11591 #ifdef TARGET_NR_vmsplice
11592 case TARGET_NR_vmsplice:
11594 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11596 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11597 unlock_iovec(vec, arg2, arg3, 0);
11599 ret = -host_to_target_errno(errno);
11604 #endif /* CONFIG_SPLICE */
11605 #ifdef CONFIG_EVENTFD
11606 #if defined(TARGET_NR_eventfd)
11607 case TARGET_NR_eventfd:
11608 ret = get_errno(eventfd(arg1, 0));
11609 fd_trans_unregister(ret);
11612 #if defined(TARGET_NR_eventfd2)
11613 case TARGET_NR_eventfd2:
11615 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11616 if (arg2 & TARGET_O_NONBLOCK) {
11617 host_flags |= O_NONBLOCK;
11619 if (arg2 & TARGET_O_CLOEXEC) {
11620 host_flags |= O_CLOEXEC;
11622 ret = get_errno(eventfd(arg1, host_flags));
11623 fd_trans_unregister(ret);
11627 #endif /* CONFIG_EVENTFD */
11628 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11629 case TARGET_NR_fallocate:
11630 #if TARGET_ABI_BITS == 32
11631 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11632 target_offset64(arg5, arg6)));
11634 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11638 #if defined(CONFIG_SYNC_FILE_RANGE)
11639 #if defined(TARGET_NR_sync_file_range)
11640 case TARGET_NR_sync_file_range:
11641 #if TARGET_ABI_BITS == 32
11642 #if defined(TARGET_MIPS)
11643 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11644 target_offset64(arg5, arg6), arg7));
11646 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11647 target_offset64(arg4, arg5), arg6));
11648 #endif /* !TARGET_MIPS */
11650 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11654 #if defined(TARGET_NR_sync_file_range2)
11655 case TARGET_NR_sync_file_range2:
11656 /* This is like sync_file_range but the arguments are reordered */
11657 #if TARGET_ABI_BITS == 32
11658 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11659 target_offset64(arg5, arg6), arg2));
11661 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11666 #if defined(TARGET_NR_signalfd4)
11667 case TARGET_NR_signalfd4:
11668 ret = do_signalfd4(arg1, arg2, arg4);
11671 #if defined(TARGET_NR_signalfd)
11672 case TARGET_NR_signalfd:
11673 ret = do_signalfd4(arg1, arg2, 0);
11676 #if defined(CONFIG_EPOLL)
11677 #if defined(TARGET_NR_epoll_create)
11678 case TARGET_NR_epoll_create:
11679 ret = get_errno(epoll_create(arg1));
11682 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11683 case TARGET_NR_epoll_create1:
11684 ret = get_errno(epoll_create1(arg1));
11687 #if defined(TARGET_NR_epoll_ctl)
11688 case TARGET_NR_epoll_ctl:
11690 struct epoll_event ep;
11691 struct epoll_event *epp = 0;
11693 struct target_epoll_event *target_ep;
11694 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11697 ep.events = tswap32(target_ep->events);
11698 /* The epoll_data_t union is just opaque data to the kernel,
11699 * so we transfer all 64 bits across and need not worry what
11700 * actual data type it is.
11702 ep.data.u64 = tswap64(target_ep->data.u64);
11703 unlock_user_struct(target_ep, arg4, 0);
11706 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11711 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11712 #if defined(TARGET_NR_epoll_wait)
11713 case TARGET_NR_epoll_wait:
11715 #if defined(TARGET_NR_epoll_pwait)
11716 case TARGET_NR_epoll_pwait:
11719 struct target_epoll_event *target_ep;
11720 struct epoll_event *ep;
11722 int maxevents = arg3;
11723 int timeout = arg4;
11725 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11726 ret = -TARGET_EINVAL;
11730 target_ep = lock_user(VERIFY_WRITE, arg2,
11731 maxevents * sizeof(struct target_epoll_event), 1);
11736 ep = alloca(maxevents * sizeof(struct epoll_event));
11739 #if defined(TARGET_NR_epoll_pwait)
11740 case TARGET_NR_epoll_pwait:
11742 target_sigset_t *target_set;
11743 sigset_t _set, *set = &_set;
11746 if (arg6 != sizeof(target_sigset_t)) {
11747 ret = -TARGET_EINVAL;
11751 target_set = lock_user(VERIFY_READ, arg5,
11752 sizeof(target_sigset_t), 1);
11754 unlock_user(target_ep, arg2, 0);
11757 target_to_host_sigset(set, target_set);
11758 unlock_user(target_set, arg5, 0);
11763 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11764 set, SIGSET_T_SIZE));
11768 #if defined(TARGET_NR_epoll_wait)
11769 case TARGET_NR_epoll_wait:
11770 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11775 ret = -TARGET_ENOSYS;
11777 if (!is_error(ret)) {
11779 for (i = 0; i < ret; i++) {
11780 target_ep[i].events = tswap32(ep[i].events);
11781 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11784 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11789 #ifdef TARGET_NR_prlimit64
11790 case TARGET_NR_prlimit64:
11792 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11793 struct target_rlimit64 *target_rnew, *target_rold;
11794 struct host_rlimit64 rnew, rold, *rnewp = 0;
11795 int resource = target_to_host_resource(arg2);
11797 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11800 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11801 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11802 unlock_user_struct(target_rnew, arg3, 0);
11806 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11807 if (!is_error(ret) && arg4) {
11808 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11811 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11812 target_rold->rlim_max = tswap64(rold.rlim_max);
11813 unlock_user_struct(target_rold, arg4, 1);
11818 #ifdef TARGET_NR_gethostname
11819 case TARGET_NR_gethostname:
11821 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11823 ret = get_errno(gethostname(name, arg2));
11824 unlock_user(name, arg1, arg2);
11826 ret = -TARGET_EFAULT;
11831 #ifdef TARGET_NR_atomic_cmpxchg_32
11832 case TARGET_NR_atomic_cmpxchg_32:
11834 /* should use start_exclusive from main.c */
11835 abi_ulong mem_value;
11836 if (get_user_u32(mem_value, arg6)) {
11837 target_siginfo_t info;
11838 info.si_signo = SIGSEGV;
11840 info.si_code = TARGET_SEGV_MAPERR;
11841 info._sifields._sigfault._addr = arg6;
11842 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11843 QEMU_SI_FAULT, &info);
11847 if (mem_value == arg2)
11848 put_user_u32(arg1, arg6);
11853 #ifdef TARGET_NR_atomic_barrier
11854 case TARGET_NR_atomic_barrier:
11856 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11862 #ifdef TARGET_NR_timer_create
11863 case TARGET_NR_timer_create:
11865 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11867 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11870 int timer_index = next_free_host_timer();
11872 if (timer_index < 0) {
11873 ret = -TARGET_EAGAIN;
11875 timer_t *phtimer = g_posix_timers + timer_index;
11878 phost_sevp = &host_sevp;
11879 ret = target_to_host_sigevent(phost_sevp, arg2);
11885 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11889 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11898 #ifdef TARGET_NR_timer_settime
11899 case TARGET_NR_timer_settime:
11901 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11902 * struct itimerspec * old_value */
11903 target_timer_t timerid = get_timer_id(arg1);
11907 } else if (arg3 == 0) {
11908 ret = -TARGET_EINVAL;
11910 timer_t htimer = g_posix_timers[timerid];
11911 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11913 target_to_host_itimerspec(&hspec_new, arg3);
11915 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11916 host_to_target_itimerspec(arg2, &hspec_old);
11922 #ifdef TARGET_NR_timer_gettime
11923 case TARGET_NR_timer_gettime:
11925 /* args: timer_t timerid, struct itimerspec *curr_value */
11926 target_timer_t timerid = get_timer_id(arg1);
11930 } else if (!arg2) {
11931 ret = -TARGET_EFAULT;
11933 timer_t htimer = g_posix_timers[timerid];
11934 struct itimerspec hspec;
11935 ret = get_errno(timer_gettime(htimer, &hspec));
11937 if (host_to_target_itimerspec(arg2, &hspec)) {
11938 ret = -TARGET_EFAULT;
11945 #ifdef TARGET_NR_timer_getoverrun
11946 case TARGET_NR_timer_getoverrun:
11948 /* args: timer_t timerid */
11949 target_timer_t timerid = get_timer_id(arg1);
11954 timer_t htimer = g_posix_timers[timerid];
11955 ret = get_errno(timer_getoverrun(htimer));
11957 fd_trans_unregister(ret);
11962 #ifdef TARGET_NR_timer_delete
11963 case TARGET_NR_timer_delete:
11965 /* args: timer_t timerid */
11966 target_timer_t timerid = get_timer_id(arg1);
11971 timer_t htimer = g_posix_timers[timerid];
11972 ret = get_errno(timer_delete(htimer));
11973 g_posix_timers[timerid] = 0;
11979 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11980 case TARGET_NR_timerfd_create:
11981 ret = get_errno(timerfd_create(arg1,
11982 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11986 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11987 case TARGET_NR_timerfd_gettime:
11989 struct itimerspec its_curr;
11991 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11993 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12000 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12001 case TARGET_NR_timerfd_settime:
12003 struct itimerspec its_new, its_old, *p_new;
12006 if (target_to_host_itimerspec(&its_new, arg3)) {
12014 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12016 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12023 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12024 case TARGET_NR_ioprio_get:
12025 ret = get_errno(ioprio_get(arg1, arg2));
12029 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12030 case TARGET_NR_ioprio_set:
12031 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12035 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12036 case TARGET_NR_setns:
12037 ret = get_errno(setns(arg1, arg2));
12040 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12041 case TARGET_NR_unshare:
12042 ret = get_errno(unshare(arg1));
12048 gemu_log("qemu: Unsupported syscall: %d\n", num);
12049 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12050 unimplemented_nowarn:
12052 ret = -TARGET_ENOSYS;
12057 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12060 print_syscall_ret(num, ret);
12061 trace_guest_user_syscall_ret(cpu, num, ret);
12064 ret = -TARGET_EFAULT;