4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
254 _syscall0(int, gettid)
256 /* This is a replacement for the host gettid() and must return a host
258 static int gettid(void) {
262 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
265 #if !defined(__NR_getdents) || \
266 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
267 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
269 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
270 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
271 loff_t *, res, uint, wh);
273 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
274 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
276 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group,int,error_code)
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address,int *,tidptr)
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
285 const struct timespec *,timeout,int *,uaddr2,int,val3)
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
289 unsigned long *, user_mask_ptr);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
292 unsigned long *, user_mask_ptr);
293 #define __NR_sys_getcpu __NR_getcpu
294 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
295 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
297 _syscall2(int, capget, struct __user_cap_header_struct *, header,
298 struct __user_cap_data_struct *, data);
299 _syscall2(int, capset, struct __user_cap_header_struct *, header,
300 struct __user_cap_data_struct *, data);
301 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
302 _syscall2(int, ioprio_get, int, which, int, who)
304 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
305 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
307 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
308 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
311 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
312 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
313 unsigned long, idx1, unsigned long, idx2)
316 static bitmask_transtbl fcntl_flags_tbl[] = {
317 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
318 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
319 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
320 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
321 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
322 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
323 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
324 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
325 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
326 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
327 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
328 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
329 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
330 #if defined(O_DIRECT)
331 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
333 #if defined(O_NOATIME)
334 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
336 #if defined(O_CLOEXEC)
337 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
340 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
342 #if defined(O_TMPFILE)
343 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
345 /* Don't terminate the list prematurely on 64-bit host+guest. */
346 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
347 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
354 QEMU_IFLA_BR_FORWARD_DELAY,
355 QEMU_IFLA_BR_HELLO_TIME,
356 QEMU_IFLA_BR_MAX_AGE,
357 QEMU_IFLA_BR_AGEING_TIME,
358 QEMU_IFLA_BR_STP_STATE,
359 QEMU_IFLA_BR_PRIORITY,
360 QEMU_IFLA_BR_VLAN_FILTERING,
361 QEMU_IFLA_BR_VLAN_PROTOCOL,
362 QEMU_IFLA_BR_GROUP_FWD_MASK,
363 QEMU_IFLA_BR_ROOT_ID,
364 QEMU_IFLA_BR_BRIDGE_ID,
365 QEMU_IFLA_BR_ROOT_PORT,
366 QEMU_IFLA_BR_ROOT_PATH_COST,
367 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
368 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
369 QEMU_IFLA_BR_HELLO_TIMER,
370 QEMU_IFLA_BR_TCN_TIMER,
371 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
372 QEMU_IFLA_BR_GC_TIMER,
373 QEMU_IFLA_BR_GROUP_ADDR,
374 QEMU_IFLA_BR_FDB_FLUSH,
375 QEMU_IFLA_BR_MCAST_ROUTER,
376 QEMU_IFLA_BR_MCAST_SNOOPING,
377 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
378 QEMU_IFLA_BR_MCAST_QUERIER,
379 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
380 QEMU_IFLA_BR_MCAST_HASH_MAX,
381 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
382 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
383 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
384 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
385 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
386 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
387 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
388 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
389 QEMU_IFLA_BR_NF_CALL_IPTABLES,
390 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
391 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
392 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
394 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
395 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
419 QEMU_IFLA_NET_NS_PID,
422 QEMU_IFLA_VFINFO_LIST,
430 QEMU_IFLA_PROMISCUITY,
431 QEMU_IFLA_NUM_TX_QUEUES,
432 QEMU_IFLA_NUM_RX_QUEUES,
434 QEMU_IFLA_PHYS_PORT_ID,
435 QEMU_IFLA_CARRIER_CHANGES,
436 QEMU_IFLA_PHYS_SWITCH_ID,
437 QEMU_IFLA_LINK_NETNSID,
438 QEMU_IFLA_PHYS_PORT_NAME,
439 QEMU_IFLA_PROTO_DOWN,
440 QEMU_IFLA_GSO_MAX_SEGS,
441 QEMU_IFLA_GSO_MAX_SIZE,
448 QEMU_IFLA_BRPORT_UNSPEC,
449 QEMU_IFLA_BRPORT_STATE,
450 QEMU_IFLA_BRPORT_PRIORITY,
451 QEMU_IFLA_BRPORT_COST,
452 QEMU_IFLA_BRPORT_MODE,
453 QEMU_IFLA_BRPORT_GUARD,
454 QEMU_IFLA_BRPORT_PROTECT,
455 QEMU_IFLA_BRPORT_FAST_LEAVE,
456 QEMU_IFLA_BRPORT_LEARNING,
457 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
458 QEMU_IFLA_BRPORT_PROXYARP,
459 QEMU_IFLA_BRPORT_LEARNING_SYNC,
460 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
461 QEMU_IFLA_BRPORT_ROOT_ID,
462 QEMU_IFLA_BRPORT_BRIDGE_ID,
463 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
464 QEMU_IFLA_BRPORT_DESIGNATED_COST,
467 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
468 QEMU_IFLA_BRPORT_CONFIG_PENDING,
469 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
470 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
471 QEMU_IFLA_BRPORT_HOLD_TIMER,
472 QEMU_IFLA_BRPORT_FLUSH,
473 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
474 QEMU_IFLA_BRPORT_PAD,
475 QEMU___IFLA_BRPORT_MAX
479 QEMU_IFLA_INFO_UNSPEC,
482 QEMU_IFLA_INFO_XSTATS,
483 QEMU_IFLA_INFO_SLAVE_KIND,
484 QEMU_IFLA_INFO_SLAVE_DATA,
485 QEMU___IFLA_INFO_MAX,
489 QEMU_IFLA_INET_UNSPEC,
491 QEMU___IFLA_INET_MAX,
495 QEMU_IFLA_INET6_UNSPEC,
496 QEMU_IFLA_INET6_FLAGS,
497 QEMU_IFLA_INET6_CONF,
498 QEMU_IFLA_INET6_STATS,
499 QEMU_IFLA_INET6_MCAST,
500 QEMU_IFLA_INET6_CACHEINFO,
501 QEMU_IFLA_INET6_ICMP6STATS,
502 QEMU_IFLA_INET6_TOKEN,
503 QEMU_IFLA_INET6_ADDR_GEN_MODE,
504 QEMU___IFLA_INET6_MAX
507 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
508 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
509 typedef struct TargetFdTrans {
510 TargetFdDataFunc host_to_target_data;
511 TargetFdDataFunc target_to_host_data;
512 TargetFdAddrFunc target_to_host_addr;
515 static TargetFdTrans **target_fd_trans;
517 static unsigned int target_fd_max;
519 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
521 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
522 return target_fd_trans[fd]->target_to_host_data;
527 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
529 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
530 return target_fd_trans[fd]->host_to_target_data;
535 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
537 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
538 return target_fd_trans[fd]->target_to_host_addr;
543 static void fd_trans_register(int fd, TargetFdTrans *trans)
547 if (fd >= target_fd_max) {
548 oldmax = target_fd_max;
549 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
550 target_fd_trans = g_renew(TargetFdTrans *,
551 target_fd_trans, target_fd_max);
552 memset((void *)(target_fd_trans + oldmax), 0,
553 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
555 target_fd_trans[fd] = trans;
558 static void fd_trans_unregister(int fd)
560 if (fd >= 0 && fd < target_fd_max) {
561 target_fd_trans[fd] = NULL;
565 static void fd_trans_dup(int oldfd, int newfd)
567 fd_trans_unregister(newfd);
568 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
569 fd_trans_register(newfd, target_fd_trans[oldfd]);
573 static int sys_getcwd1(char *buf, size_t size)
575 if (getcwd(buf, size) == NULL) {
576 /* getcwd() sets errno */
579 return strlen(buf)+1;
582 #ifdef TARGET_NR_utimensat
583 #if defined(__NR_utimensat)
584 #define __NR_sys_utimensat __NR_utimensat
585 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
586 const struct timespec *,tsp,int,flags)
588 static int sys_utimensat(int dirfd, const char *pathname,
589 const struct timespec times[2], int flags)
595 #endif /* TARGET_NR_utimensat */
597 #ifdef TARGET_NR_renameat2
598 #if defined(__NR_renameat2)
599 #define __NR_sys_renameat2 __NR_renameat2
600 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
601 const char *, new, unsigned int, flags)
603 static int sys_renameat2(int oldfd, const char *old,
604 int newfd, const char *new, int flags)
607 return renameat(oldfd, old, newfd, new);
613 #endif /* TARGET_NR_renameat2 */
615 #ifdef CONFIG_INOTIFY
616 #include <sys/inotify.h>
618 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
619 static int sys_inotify_init(void)
621 return (inotify_init());
624 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
625 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
627 return (inotify_add_watch(fd, pathname, mask));
630 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
631 static int sys_inotify_rm_watch(int fd, int32_t wd)
633 return (inotify_rm_watch(fd, wd));
636 #ifdef CONFIG_INOTIFY1
637 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
638 static int sys_inotify_init1(int flags)
640 return (inotify_init1(flags));
645 /* Userspace can usually survive runtime without inotify */
646 #undef TARGET_NR_inotify_init
647 #undef TARGET_NR_inotify_init1
648 #undef TARGET_NR_inotify_add_watch
649 #undef TARGET_NR_inotify_rm_watch
650 #endif /* CONFIG_INOTIFY */
652 #if defined(TARGET_NR_prlimit64)
653 #ifndef __NR_prlimit64
654 # define __NR_prlimit64 -1
656 #define __NR_sys_prlimit64 __NR_prlimit64
657 /* The glibc rlimit structure may not be that used by the underlying syscall */
658 struct host_rlimit64 {
662 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
663 const struct host_rlimit64 *, new_limit,
664 struct host_rlimit64 *, old_limit)
668 #if defined(TARGET_NR_timer_create)
669 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
670 static timer_t g_posix_timers[32] = { 0, } ;
672 static inline int next_free_host_timer(void)
675 /* FIXME: Does finding the next free slot require a lock? */
676 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
677 if (g_posix_timers[k] == 0) {
678 g_posix_timers[k] = (timer_t) 1;
686 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
688 static inline int regpairs_aligned(void *cpu_env, int num)
690 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
692 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
693 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
694 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
695 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
696 * of registers which translates to the same as ARM/MIPS, because we start with
698 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
699 #elif defined(TARGET_SH4)
700 /* SH4 doesn't align register pairs, except for p{read,write}64 */
701 static inline int regpairs_aligned(void *cpu_env, int num)
704 case TARGET_NR_pread64:
705 case TARGET_NR_pwrite64:
712 #elif defined(TARGET_XTENSA)
713 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
715 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
718 #define ERRNO_TABLE_SIZE 1200
720 /* target_to_host_errno_table[] is initialized from
721 * host_to_target_errno_table[] in syscall_init(). */
722 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
726 * This list is the union of errno values overridden in asm-<arch>/errno.h
727 * minus the errnos that are not actually generic to all archs.
729 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
730 [EAGAIN] = TARGET_EAGAIN,
731 [EIDRM] = TARGET_EIDRM,
732 [ECHRNG] = TARGET_ECHRNG,
733 [EL2NSYNC] = TARGET_EL2NSYNC,
734 [EL3HLT] = TARGET_EL3HLT,
735 [EL3RST] = TARGET_EL3RST,
736 [ELNRNG] = TARGET_ELNRNG,
737 [EUNATCH] = TARGET_EUNATCH,
738 [ENOCSI] = TARGET_ENOCSI,
739 [EL2HLT] = TARGET_EL2HLT,
740 [EDEADLK] = TARGET_EDEADLK,
741 [ENOLCK] = TARGET_ENOLCK,
742 [EBADE] = TARGET_EBADE,
743 [EBADR] = TARGET_EBADR,
744 [EXFULL] = TARGET_EXFULL,
745 [ENOANO] = TARGET_ENOANO,
746 [EBADRQC] = TARGET_EBADRQC,
747 [EBADSLT] = TARGET_EBADSLT,
748 [EBFONT] = TARGET_EBFONT,
749 [ENOSTR] = TARGET_ENOSTR,
750 [ENODATA] = TARGET_ENODATA,
751 [ETIME] = TARGET_ETIME,
752 [ENOSR] = TARGET_ENOSR,
753 [ENONET] = TARGET_ENONET,
754 [ENOPKG] = TARGET_ENOPKG,
755 [EREMOTE] = TARGET_EREMOTE,
756 [ENOLINK] = TARGET_ENOLINK,
757 [EADV] = TARGET_EADV,
758 [ESRMNT] = TARGET_ESRMNT,
759 [ECOMM] = TARGET_ECOMM,
760 [EPROTO] = TARGET_EPROTO,
761 [EDOTDOT] = TARGET_EDOTDOT,
762 [EMULTIHOP] = TARGET_EMULTIHOP,
763 [EBADMSG] = TARGET_EBADMSG,
764 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
765 [EOVERFLOW] = TARGET_EOVERFLOW,
766 [ENOTUNIQ] = TARGET_ENOTUNIQ,
767 [EBADFD] = TARGET_EBADFD,
768 [EREMCHG] = TARGET_EREMCHG,
769 [ELIBACC] = TARGET_ELIBACC,
770 [ELIBBAD] = TARGET_ELIBBAD,
771 [ELIBSCN] = TARGET_ELIBSCN,
772 [ELIBMAX] = TARGET_ELIBMAX,
773 [ELIBEXEC] = TARGET_ELIBEXEC,
774 [EILSEQ] = TARGET_EILSEQ,
775 [ENOSYS] = TARGET_ENOSYS,
776 [ELOOP] = TARGET_ELOOP,
777 [ERESTART] = TARGET_ERESTART,
778 [ESTRPIPE] = TARGET_ESTRPIPE,
779 [ENOTEMPTY] = TARGET_ENOTEMPTY,
780 [EUSERS] = TARGET_EUSERS,
781 [ENOTSOCK] = TARGET_ENOTSOCK,
782 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
783 [EMSGSIZE] = TARGET_EMSGSIZE,
784 [EPROTOTYPE] = TARGET_EPROTOTYPE,
785 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
786 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
787 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
788 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
789 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
790 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
791 [EADDRINUSE] = TARGET_EADDRINUSE,
792 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
793 [ENETDOWN] = TARGET_ENETDOWN,
794 [ENETUNREACH] = TARGET_ENETUNREACH,
795 [ENETRESET] = TARGET_ENETRESET,
796 [ECONNABORTED] = TARGET_ECONNABORTED,
797 [ECONNRESET] = TARGET_ECONNRESET,
798 [ENOBUFS] = TARGET_ENOBUFS,
799 [EISCONN] = TARGET_EISCONN,
800 [ENOTCONN] = TARGET_ENOTCONN,
801 [EUCLEAN] = TARGET_EUCLEAN,
802 [ENOTNAM] = TARGET_ENOTNAM,
803 [ENAVAIL] = TARGET_ENAVAIL,
804 [EISNAM] = TARGET_EISNAM,
805 [EREMOTEIO] = TARGET_EREMOTEIO,
806 [EDQUOT] = TARGET_EDQUOT,
807 [ESHUTDOWN] = TARGET_ESHUTDOWN,
808 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
809 [ETIMEDOUT] = TARGET_ETIMEDOUT,
810 [ECONNREFUSED] = TARGET_ECONNREFUSED,
811 [EHOSTDOWN] = TARGET_EHOSTDOWN,
812 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
813 [EALREADY] = TARGET_EALREADY,
814 [EINPROGRESS] = TARGET_EINPROGRESS,
815 [ESTALE] = TARGET_ESTALE,
816 [ECANCELED] = TARGET_ECANCELED,
817 [ENOMEDIUM] = TARGET_ENOMEDIUM,
818 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
820 [ENOKEY] = TARGET_ENOKEY,
823 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
826 [EKEYREVOKED] = TARGET_EKEYREVOKED,
829 [EKEYREJECTED] = TARGET_EKEYREJECTED,
832 [EOWNERDEAD] = TARGET_EOWNERDEAD,
834 #ifdef ENOTRECOVERABLE
835 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
838 [ENOMSG] = TARGET_ENOMSG,
841 [ERFKILL] = TARGET_ERFKILL,
844 [EHWPOISON] = TARGET_EHWPOISON,
848 static inline int host_to_target_errno(int err)
850 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
851 host_to_target_errno_table[err]) {
852 return host_to_target_errno_table[err];
857 static inline int target_to_host_errno(int err)
859 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
860 target_to_host_errno_table[err]) {
861 return target_to_host_errno_table[err];
866 static inline abi_long get_errno(abi_long ret)
869 return -host_to_target_errno(errno);
874 static inline int is_error(abi_long ret)
876 return (abi_ulong)ret >= (abi_ulong)(-4096);
879 const char *target_strerror(int err)
881 if (err == TARGET_ERESTARTSYS) {
882 return "To be restarted";
884 if (err == TARGET_QEMU_ESIGRETURN) {
885 return "Successful exit from sigreturn";
888 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
891 return strerror(target_to_host_errno(err));
894 #define safe_syscall0(type, name) \
895 static type safe_##name(void) \
897 return safe_syscall(__NR_##name); \
900 #define safe_syscall1(type, name, type1, arg1) \
901 static type safe_##name(type1 arg1) \
903 return safe_syscall(__NR_##name, arg1); \
906 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
907 static type safe_##name(type1 arg1, type2 arg2) \
909 return safe_syscall(__NR_##name, arg1, arg2); \
912 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
913 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
915 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
918 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
920 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
922 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
925 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
926 type4, arg4, type5, arg5) \
927 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
930 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
933 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
934 type4, arg4, type5, arg5, type6, arg6) \
935 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
936 type5 arg5, type6 arg6) \
938 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
941 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
942 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
943 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
944 int, flags, mode_t, mode)
945 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
946 struct rusage *, rusage)
947 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
948 int, options, struct rusage *, rusage)
949 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
950 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
951 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
952 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
953 struct timespec *, tsp, const sigset_t *, sigmask,
955 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
956 int, maxevents, int, timeout, const sigset_t *, sigmask,
958 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
959 const struct timespec *,timeout,int *,uaddr2,int,val3)
960 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
961 safe_syscall2(int, kill, pid_t, pid, int, sig)
962 safe_syscall2(int, tkill, int, tid, int, sig)
963 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
964 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
965 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
966 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
967 unsigned long, pos_l, unsigned long, pos_h)
968 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
969 unsigned long, pos_l, unsigned long, pos_h)
970 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
972 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
973 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
974 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
975 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
976 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
977 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
978 safe_syscall2(int, flock, int, fd, int, operation)
979 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
980 const struct timespec *, uts, size_t, sigsetsize)
981 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
983 safe_syscall2(int, nanosleep, const struct timespec *, req,
984 struct timespec *, rem)
985 #ifdef TARGET_NR_clock_nanosleep
986 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
987 const struct timespec *, req, struct timespec *, rem)
990 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
992 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
993 long, msgtype, int, flags)
994 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
995 unsigned, nsops, const struct timespec *, timeout)
997 /* This host kernel architecture uses a single ipc syscall; fake up
998 * wrappers for the sub-operations to hide this implementation detail.
999 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1000 * for the call parameter because some structs in there conflict with the
1001 * sys/ipc.h ones. So we just define them here, and rely on them being
1002 * the same for all host architectures.
1004 #define Q_SEMTIMEDOP 4
1007 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1009 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1010 void *, ptr, long, fifth)
1011 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1013 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1015 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1017 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1019 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1020 const struct timespec *timeout)
1022 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1026 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1027 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1028 size_t, len, unsigned, prio, const struct timespec *, timeout)
1029 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1030 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1032 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1033 * "third argument might be integer or pointer or not present" behaviour of
1034 * the libc function.
1036 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1037 /* Similarly for fcntl. Note that callers must always:
1038 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1039 * use the flock64 struct rather than unsuffixed flock
1040 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1043 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1045 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1048 static inline int host_to_target_sock_type(int host_type)
1052 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1054 target_type = TARGET_SOCK_DGRAM;
1057 target_type = TARGET_SOCK_STREAM;
1060 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1064 #if defined(SOCK_CLOEXEC)
1065 if (host_type & SOCK_CLOEXEC) {
1066 target_type |= TARGET_SOCK_CLOEXEC;
1070 #if defined(SOCK_NONBLOCK)
1071 if (host_type & SOCK_NONBLOCK) {
1072 target_type |= TARGET_SOCK_NONBLOCK;
1079 static abi_ulong target_brk;
1080 static abi_ulong target_original_brk;
1081 static abi_ulong brk_page;
1083 void target_set_brk(abi_ulong new_brk)
1085 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1086 brk_page = HOST_PAGE_ALIGN(target_brk);
1089 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1090 #define DEBUGF_BRK(message, args...)
1092 /* do_brk() must return target values and target errnos. */
1093 abi_long do_brk(abi_ulong new_brk)
1095 abi_long mapped_addr;
1096 abi_ulong new_alloc_size;
1098 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1101 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1104 if (new_brk < target_original_brk) {
1105 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1110 /* If the new brk is less than the highest page reserved to the
1111 * target heap allocation, set it and we're almost done... */
1112 if (new_brk <= brk_page) {
1113 /* Heap contents are initialized to zero, as for anonymous
1115 if (new_brk > target_brk) {
1116 memset(g2h(target_brk), 0, new_brk - target_brk);
1118 target_brk = new_brk;
1119 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1123 /* We need to allocate more memory after the brk... Note that
1124 * we don't use MAP_FIXED because that will map over the top of
1125 * any existing mapping (like the one with the host libc or qemu
1126 * itself); instead we treat "mapped but at wrong address" as
1127 * a failure and unmap again.
1129 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1130 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1131 PROT_READ|PROT_WRITE,
1132 MAP_ANON|MAP_PRIVATE, 0, 0));
1134 if (mapped_addr == brk_page) {
1135 /* Heap contents are initialized to zero, as for anonymous
1136 * mapped pages. Technically the new pages are already
1137 * initialized to zero since they *are* anonymous mapped
1138 * pages, however we have to take care with the contents that
1139 * come from the remaining part of the previous page: it may
1140 * contains garbage data due to a previous heap usage (grown
1141 * then shrunken). */
1142 memset(g2h(target_brk), 0, brk_page - target_brk);
1144 target_brk = new_brk;
1145 brk_page = HOST_PAGE_ALIGN(target_brk);
1146 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1149 } else if (mapped_addr != -1) {
1150 /* Mapped but at wrong address, meaning there wasn't actually
1151 * enough space for this brk.
1153 target_munmap(mapped_addr, new_alloc_size);
1155 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1158 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1161 #if defined(TARGET_ALPHA)
1162 /* We (partially) emulate OSF/1 on Alpha, which requires we
1163 return a proper errno, not an unchanged brk value. */
1164 return -TARGET_ENOMEM;
1166 /* For everything else, return the previous break. */
1170 static inline abi_long copy_from_user_fdset(fd_set *fds,
1171 abi_ulong target_fds_addr,
1175 abi_ulong b, *target_fds;
1177 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1178 if (!(target_fds = lock_user(VERIFY_READ,
1180 sizeof(abi_ulong) * nw,
1182 return -TARGET_EFAULT;
1186 for (i = 0; i < nw; i++) {
1187 /* grab the abi_ulong */
1188 __get_user(b, &target_fds[i]);
1189 for (j = 0; j < TARGET_ABI_BITS; j++) {
1190 /* check the bit inside the abi_ulong */
1197 unlock_user(target_fds, target_fds_addr, 0);
1202 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1203 abi_ulong target_fds_addr,
1206 if (target_fds_addr) {
1207 if (copy_from_user_fdset(fds, target_fds_addr, n))
1208 return -TARGET_EFAULT;
1216 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1222 abi_ulong *target_fds;
1224 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1225 if (!(target_fds = lock_user(VERIFY_WRITE,
1227 sizeof(abi_ulong) * nw,
1229 return -TARGET_EFAULT;
1232 for (i = 0; i < nw; i++) {
1234 for (j = 0; j < TARGET_ABI_BITS; j++) {
1235 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1238 __put_user(v, &target_fds[i]);
1241 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1246 #if defined(__alpha__)
1247 #define HOST_HZ 1024
1252 static inline abi_long host_to_target_clock_t(long ticks)
1254 #if HOST_HZ == TARGET_HZ
1257 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1261 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1262 const struct rusage *rusage)
1264 struct target_rusage *target_rusage;
1266 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1267 return -TARGET_EFAULT;
1268 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1269 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1270 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1271 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1272 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1273 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1274 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1275 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1276 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1277 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1278 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1279 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1280 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1281 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1282 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1283 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1284 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1285 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1286 unlock_user_struct(target_rusage, target_addr, 1);
1291 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1293 abi_ulong target_rlim_swap;
1296 target_rlim_swap = tswapal(target_rlim);
1297 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1298 return RLIM_INFINITY;
1300 result = target_rlim_swap;
1301 if (target_rlim_swap != (rlim_t)result)
1302 return RLIM_INFINITY;
1307 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1309 abi_ulong target_rlim_swap;
1312 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1313 target_rlim_swap = TARGET_RLIM_INFINITY;
1315 target_rlim_swap = rlim;
1316 result = tswapal(target_rlim_swap);
1321 static inline int target_to_host_resource(int code)
1324 case TARGET_RLIMIT_AS:
1326 case TARGET_RLIMIT_CORE:
1328 case TARGET_RLIMIT_CPU:
1330 case TARGET_RLIMIT_DATA:
1332 case TARGET_RLIMIT_FSIZE:
1333 return RLIMIT_FSIZE;
1334 case TARGET_RLIMIT_LOCKS:
1335 return RLIMIT_LOCKS;
1336 case TARGET_RLIMIT_MEMLOCK:
1337 return RLIMIT_MEMLOCK;
1338 case TARGET_RLIMIT_MSGQUEUE:
1339 return RLIMIT_MSGQUEUE;
1340 case TARGET_RLIMIT_NICE:
1342 case TARGET_RLIMIT_NOFILE:
1343 return RLIMIT_NOFILE;
1344 case TARGET_RLIMIT_NPROC:
1345 return RLIMIT_NPROC;
1346 case TARGET_RLIMIT_RSS:
1348 case TARGET_RLIMIT_RTPRIO:
1349 return RLIMIT_RTPRIO;
1350 case TARGET_RLIMIT_SIGPENDING:
1351 return RLIMIT_SIGPENDING;
1352 case TARGET_RLIMIT_STACK:
1353 return RLIMIT_STACK;
1359 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1360 abi_ulong target_tv_addr)
1362 struct target_timeval *target_tv;
1364 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1365 return -TARGET_EFAULT;
1367 __get_user(tv->tv_sec, &target_tv->tv_sec);
1368 __get_user(tv->tv_usec, &target_tv->tv_usec);
1370 unlock_user_struct(target_tv, target_tv_addr, 0);
1375 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1376 const struct timeval *tv)
1378 struct target_timeval *target_tv;
1380 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1381 return -TARGET_EFAULT;
1383 __put_user(tv->tv_sec, &target_tv->tv_sec);
1384 __put_user(tv->tv_usec, &target_tv->tv_usec);
1386 unlock_user_struct(target_tv, target_tv_addr, 1);
1391 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1392 abi_ulong target_tz_addr)
1394 struct target_timezone *target_tz;
1396 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1397 return -TARGET_EFAULT;
1400 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1401 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1403 unlock_user_struct(target_tz, target_tz_addr, 0);
1408 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1411 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1412 abi_ulong target_mq_attr_addr)
1414 struct target_mq_attr *target_mq_attr;
1416 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1417 target_mq_attr_addr, 1))
1418 return -TARGET_EFAULT;
1420 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1421 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1422 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1423 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1425 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1430 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1431 const struct mq_attr *attr)
1433 struct target_mq_attr *target_mq_attr;
1435 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1436 target_mq_attr_addr, 0))
1437 return -TARGET_EFAULT;
1439 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1440 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1441 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1442 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1444 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1450 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1451 /* do_select() must return target values and target errnos. */
1452 static abi_long do_select(int n,
1453 abi_ulong rfd_addr, abi_ulong wfd_addr,
1454 abi_ulong efd_addr, abi_ulong target_tv_addr)
1456 fd_set rfds, wfds, efds;
1457 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1459 struct timespec ts, *ts_ptr;
1462 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1466 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1470 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1475 if (target_tv_addr) {
1476 if (copy_from_user_timeval(&tv, target_tv_addr))
1477 return -TARGET_EFAULT;
1478 ts.tv_sec = tv.tv_sec;
1479 ts.tv_nsec = tv.tv_usec * 1000;
1485 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1488 if (!is_error(ret)) {
1489 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1490 return -TARGET_EFAULT;
1491 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1492 return -TARGET_EFAULT;
1493 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1494 return -TARGET_EFAULT;
1496 if (target_tv_addr) {
1497 tv.tv_sec = ts.tv_sec;
1498 tv.tv_usec = ts.tv_nsec / 1000;
1499 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1500 return -TARGET_EFAULT;
1508 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1509 static abi_long do_old_select(abi_ulong arg1)
1511 struct target_sel_arg_struct *sel;
1512 abi_ulong inp, outp, exp, tvp;
1515 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1516 return -TARGET_EFAULT;
1519 nsel = tswapal(sel->n);
1520 inp = tswapal(sel->inp);
1521 outp = tswapal(sel->outp);
1522 exp = tswapal(sel->exp);
1523 tvp = tswapal(sel->tvp);
1525 unlock_user_struct(sel, arg1, 0);
1527 return do_select(nsel, inp, outp, exp, tvp);
1532 static abi_long do_pipe2(int host_pipe[], int flags)
1535 return pipe2(host_pipe, flags);
1541 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1542 int flags, int is_pipe2)
1546 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1549 return get_errno(ret);
1551 /* Several targets have special calling conventions for the original
1552 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1554 #if defined(TARGET_ALPHA)
1555 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1556 return host_pipe[0];
1557 #elif defined(TARGET_MIPS)
1558 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1559 return host_pipe[0];
1560 #elif defined(TARGET_SH4)
1561 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1562 return host_pipe[0];
1563 #elif defined(TARGET_SPARC)
1564 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1565 return host_pipe[0];
1569 if (put_user_s32(host_pipe[0], pipedes)
1570 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1571 return -TARGET_EFAULT;
1572 return get_errno(ret);
1575 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1576 abi_ulong target_addr,
1579 struct target_ip_mreqn *target_smreqn;
1581 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1583 return -TARGET_EFAULT;
1584 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1585 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1586 if (len == sizeof(struct target_ip_mreqn))
1587 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1588 unlock_user(target_smreqn, target_addr, 0);
1593 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1594 abi_ulong target_addr,
1597 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1598 sa_family_t sa_family;
1599 struct target_sockaddr *target_saddr;
1601 if (fd_trans_target_to_host_addr(fd)) {
1602 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1605 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1607 return -TARGET_EFAULT;
1609 sa_family = tswap16(target_saddr->sa_family);
1611 /* Oops. The caller might send a incomplete sun_path; sun_path
1612 * must be terminated by \0 (see the manual page), but
1613 * unfortunately it is quite common to specify sockaddr_un
1614 * length as "strlen(x->sun_path)" while it should be
1615 * "strlen(...) + 1". We'll fix that here if needed.
1616 * Linux kernel has a similar feature.
1619 if (sa_family == AF_UNIX) {
1620 if (len < unix_maxlen && len > 0) {
1621 char *cp = (char*)target_saddr;
1623 if ( cp[len-1] && !cp[len] )
1626 if (len > unix_maxlen)
1630 memcpy(addr, target_saddr, len);
1631 addr->sa_family = sa_family;
1632 if (sa_family == AF_NETLINK) {
1633 struct sockaddr_nl *nladdr;
1635 nladdr = (struct sockaddr_nl *)addr;
1636 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1637 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1638 } else if (sa_family == AF_PACKET) {
1639 struct target_sockaddr_ll *lladdr;
1641 lladdr = (struct target_sockaddr_ll *)addr;
1642 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1643 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1645 unlock_user(target_saddr, target_addr, 0);
1650 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1651 struct sockaddr *addr,
1654 struct target_sockaddr *target_saddr;
1661 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1663 return -TARGET_EFAULT;
1664 memcpy(target_saddr, addr, len);
1665 if (len >= offsetof(struct target_sockaddr, sa_family) +
1666 sizeof(target_saddr->sa_family)) {
1667 target_saddr->sa_family = tswap16(addr->sa_family);
1669 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1670 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1671 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1672 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1673 } else if (addr->sa_family == AF_PACKET) {
1674 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1675 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1676 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1677 } else if (addr->sa_family == AF_INET6 &&
1678 len >= sizeof(struct target_sockaddr_in6)) {
1679 struct target_sockaddr_in6 *target_in6 =
1680 (struct target_sockaddr_in6 *)target_saddr;
1681 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1683 unlock_user(target_saddr, target_addr, len);
1688 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1689 struct target_msghdr *target_msgh)
1691 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1692 abi_long msg_controllen;
1693 abi_ulong target_cmsg_addr;
1694 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1695 socklen_t space = 0;
1697 msg_controllen = tswapal(target_msgh->msg_controllen);
1698 if (msg_controllen < sizeof (struct target_cmsghdr))
1700 target_cmsg_addr = tswapal(target_msgh->msg_control);
1701 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1702 target_cmsg_start = target_cmsg;
1704 return -TARGET_EFAULT;
1706 while (cmsg && target_cmsg) {
1707 void *data = CMSG_DATA(cmsg);
1708 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1710 int len = tswapal(target_cmsg->cmsg_len)
1711 - sizeof(struct target_cmsghdr);
1713 space += CMSG_SPACE(len);
1714 if (space > msgh->msg_controllen) {
1715 space -= CMSG_SPACE(len);
1716 /* This is a QEMU bug, since we allocated the payload
1717 * area ourselves (unlike overflow in host-to-target
1718 * conversion, which is just the guest giving us a buffer
1719 * that's too small). It can't happen for the payload types
1720 * we currently support; if it becomes an issue in future
1721 * we would need to improve our allocation strategy to
1722 * something more intelligent than "twice the size of the
1723 * target buffer we're reading from".
1725 gemu_log("Host cmsg overflow\n");
1729 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1730 cmsg->cmsg_level = SOL_SOCKET;
1732 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1734 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1735 cmsg->cmsg_len = CMSG_LEN(len);
1737 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1738 int *fd = (int *)data;
1739 int *target_fd = (int *)target_data;
1740 int i, numfds = len / sizeof(int);
1742 for (i = 0; i < numfds; i++) {
1743 __get_user(fd[i], target_fd + i);
1745 } else if (cmsg->cmsg_level == SOL_SOCKET
1746 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1747 struct ucred *cred = (struct ucred *)data;
1748 struct target_ucred *target_cred =
1749 (struct target_ucred *)target_data;
1751 __get_user(cred->pid, &target_cred->pid);
1752 __get_user(cred->uid, &target_cred->uid);
1753 __get_user(cred->gid, &target_cred->gid);
1755 gemu_log("Unsupported ancillary data: %d/%d\n",
1756 cmsg->cmsg_level, cmsg->cmsg_type);
1757 memcpy(data, target_data, len);
1760 cmsg = CMSG_NXTHDR(msgh, cmsg);
1761 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1764 unlock_user(target_cmsg, target_cmsg_addr, 0);
1766 msgh->msg_controllen = space;
1770 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1771 struct msghdr *msgh)
1773 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1774 abi_long msg_controllen;
1775 abi_ulong target_cmsg_addr;
1776 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1777 socklen_t space = 0;
1779 msg_controllen = tswapal(target_msgh->msg_controllen);
1780 if (msg_controllen < sizeof (struct target_cmsghdr))
1782 target_cmsg_addr = tswapal(target_msgh->msg_control);
1783 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1784 target_cmsg_start = target_cmsg;
1786 return -TARGET_EFAULT;
1788 while (cmsg && target_cmsg) {
1789 void *data = CMSG_DATA(cmsg);
1790 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1792 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1793 int tgt_len, tgt_space;
1795 /* We never copy a half-header but may copy half-data;
1796 * this is Linux's behaviour in put_cmsg(). Note that
1797 * truncation here is a guest problem (which we report
1798 * to the guest via the CTRUNC bit), unlike truncation
1799 * in target_to_host_cmsg, which is a QEMU bug.
1801 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1802 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1806 if (cmsg->cmsg_level == SOL_SOCKET) {
1807 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1809 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1811 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1813 /* Payload types which need a different size of payload on
1814 * the target must adjust tgt_len here.
1816 switch (cmsg->cmsg_level) {
1818 switch (cmsg->cmsg_type) {
1820 tgt_len = sizeof(struct target_timeval);
1830 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1831 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1832 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1835 /* We must now copy-and-convert len bytes of payload
1836 * into tgt_len bytes of destination space. Bear in mind
1837 * that in both source and destination we may be dealing
1838 * with a truncated value!
1840 switch (cmsg->cmsg_level) {
1842 switch (cmsg->cmsg_type) {
1845 int *fd = (int *)data;
1846 int *target_fd = (int *)target_data;
1847 int i, numfds = tgt_len / sizeof(int);
1849 for (i = 0; i < numfds; i++) {
1850 __put_user(fd[i], target_fd + i);
1856 struct timeval *tv = (struct timeval *)data;
1857 struct target_timeval *target_tv =
1858 (struct target_timeval *)target_data;
1860 if (len != sizeof(struct timeval) ||
1861 tgt_len != sizeof(struct target_timeval)) {
1865 /* copy struct timeval to target */
1866 __put_user(tv->tv_sec, &target_tv->tv_sec);
1867 __put_user(tv->tv_usec, &target_tv->tv_usec);
1870 case SCM_CREDENTIALS:
1872 struct ucred *cred = (struct ucred *)data;
1873 struct target_ucred *target_cred =
1874 (struct target_ucred *)target_data;
1876 __put_user(cred->pid, &target_cred->pid);
1877 __put_user(cred->uid, &target_cred->uid);
1878 __put_user(cred->gid, &target_cred->gid);
1887 switch (cmsg->cmsg_type) {
1890 uint32_t *v = (uint32_t *)data;
1891 uint32_t *t_int = (uint32_t *)target_data;
1893 if (len != sizeof(uint32_t) ||
1894 tgt_len != sizeof(uint32_t)) {
1897 __put_user(*v, t_int);
1903 struct sock_extended_err ee;
1904 struct sockaddr_in offender;
1906 struct errhdr_t *errh = (struct errhdr_t *)data;
1907 struct errhdr_t *target_errh =
1908 (struct errhdr_t *)target_data;
1910 if (len != sizeof(struct errhdr_t) ||
1911 tgt_len != sizeof(struct errhdr_t)) {
1914 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1915 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1916 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1917 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1918 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1919 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1920 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1921 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1922 (void *) &errh->offender, sizeof(errh->offender));
1931 switch (cmsg->cmsg_type) {
1934 uint32_t *v = (uint32_t *)data;
1935 uint32_t *t_int = (uint32_t *)target_data;
1937 if (len != sizeof(uint32_t) ||
1938 tgt_len != sizeof(uint32_t)) {
1941 __put_user(*v, t_int);
1947 struct sock_extended_err ee;
1948 struct sockaddr_in6 offender;
1950 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1951 struct errhdr6_t *target_errh =
1952 (struct errhdr6_t *)target_data;
1954 if (len != sizeof(struct errhdr6_t) ||
1955 tgt_len != sizeof(struct errhdr6_t)) {
1958 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1959 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1960 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1961 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1962 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1963 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1964 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1965 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1966 (void *) &errh->offender, sizeof(errh->offender));
1976 gemu_log("Unsupported ancillary data: %d/%d\n",
1977 cmsg->cmsg_level, cmsg->cmsg_type);
1978 memcpy(target_data, data, MIN(len, tgt_len));
1979 if (tgt_len > len) {
1980 memset(target_data + len, 0, tgt_len - len);
1984 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1985 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1986 if (msg_controllen < tgt_space) {
1987 tgt_space = msg_controllen;
1989 msg_controllen -= tgt_space;
1991 cmsg = CMSG_NXTHDR(msgh, cmsg);
1992 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1995 unlock_user(target_cmsg, target_cmsg_addr, space);
1997 target_msgh->msg_controllen = tswapal(space);
2001 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2003 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2004 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2005 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2006 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2007 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2010 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2012 abi_long (*host_to_target_nlmsg)
2013 (struct nlmsghdr *))
2018 while (len > sizeof(struct nlmsghdr)) {
2020 nlmsg_len = nlh->nlmsg_len;
2021 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2026 switch (nlh->nlmsg_type) {
2028 tswap_nlmsghdr(nlh);
2034 struct nlmsgerr *e = NLMSG_DATA(nlh);
2035 e->error = tswap32(e->error);
2036 tswap_nlmsghdr(&e->msg);
2037 tswap_nlmsghdr(nlh);
2041 ret = host_to_target_nlmsg(nlh);
2043 tswap_nlmsghdr(nlh);
2048 tswap_nlmsghdr(nlh);
2049 len -= NLMSG_ALIGN(nlmsg_len);
2050 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2055 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2057 abi_long (*target_to_host_nlmsg)
2058 (struct nlmsghdr *))
2062 while (len > sizeof(struct nlmsghdr)) {
2063 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2064 tswap32(nlh->nlmsg_len) > len) {
2067 tswap_nlmsghdr(nlh);
2068 switch (nlh->nlmsg_type) {
2075 struct nlmsgerr *e = NLMSG_DATA(nlh);
2076 e->error = tswap32(e->error);
2077 tswap_nlmsghdr(&e->msg);
2081 ret = target_to_host_nlmsg(nlh);
2086 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2087 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2092 #ifdef CONFIG_RTNETLINK
2093 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2094 size_t len, void *context,
2095 abi_long (*host_to_target_nlattr)
2099 unsigned short nla_len;
2102 while (len > sizeof(struct nlattr)) {
2103 nla_len = nlattr->nla_len;
2104 if (nla_len < sizeof(struct nlattr) ||
2108 ret = host_to_target_nlattr(nlattr, context);
2109 nlattr->nla_len = tswap16(nlattr->nla_len);
2110 nlattr->nla_type = tswap16(nlattr->nla_type);
2114 len -= NLA_ALIGN(nla_len);
2115 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2120 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2122 abi_long (*host_to_target_rtattr)
2125 unsigned short rta_len;
2128 while (len > sizeof(struct rtattr)) {
2129 rta_len = rtattr->rta_len;
2130 if (rta_len < sizeof(struct rtattr) ||
2134 ret = host_to_target_rtattr(rtattr);
2135 rtattr->rta_len = tswap16(rtattr->rta_len);
2136 rtattr->rta_type = tswap16(rtattr->rta_type);
2140 len -= RTA_ALIGN(rta_len);
2141 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2146 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2148 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2155 switch (nlattr->nla_type) {
2157 case QEMU_IFLA_BR_FDB_FLUSH:
2160 case QEMU_IFLA_BR_GROUP_ADDR:
2163 case QEMU_IFLA_BR_VLAN_FILTERING:
2164 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2165 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2166 case QEMU_IFLA_BR_MCAST_ROUTER:
2167 case QEMU_IFLA_BR_MCAST_SNOOPING:
2168 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2169 case QEMU_IFLA_BR_MCAST_QUERIER:
2170 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2171 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2172 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2175 case QEMU_IFLA_BR_PRIORITY:
2176 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2177 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2178 case QEMU_IFLA_BR_ROOT_PORT:
2179 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2180 u16 = NLA_DATA(nlattr);
2181 *u16 = tswap16(*u16);
2184 case QEMU_IFLA_BR_FORWARD_DELAY:
2185 case QEMU_IFLA_BR_HELLO_TIME:
2186 case QEMU_IFLA_BR_MAX_AGE:
2187 case QEMU_IFLA_BR_AGEING_TIME:
2188 case QEMU_IFLA_BR_STP_STATE:
2189 case QEMU_IFLA_BR_ROOT_PATH_COST:
2190 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2191 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2192 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2193 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2194 u32 = NLA_DATA(nlattr);
2195 *u32 = tswap32(*u32);
2198 case QEMU_IFLA_BR_HELLO_TIMER:
2199 case QEMU_IFLA_BR_TCN_TIMER:
2200 case QEMU_IFLA_BR_GC_TIMER:
2201 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2202 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2203 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2204 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2205 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2206 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2207 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2208 u64 = NLA_DATA(nlattr);
2209 *u64 = tswap64(*u64);
2211 /* ifla_bridge_id: uin8_t[] */
2212 case QEMU_IFLA_BR_ROOT_ID:
2213 case QEMU_IFLA_BR_BRIDGE_ID:
2216 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2222 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2229 switch (nlattr->nla_type) {
2231 case QEMU_IFLA_BRPORT_STATE:
2232 case QEMU_IFLA_BRPORT_MODE:
2233 case QEMU_IFLA_BRPORT_GUARD:
2234 case QEMU_IFLA_BRPORT_PROTECT:
2235 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2236 case QEMU_IFLA_BRPORT_LEARNING:
2237 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2238 case QEMU_IFLA_BRPORT_PROXYARP:
2239 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2240 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2241 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2242 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2243 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2246 case QEMU_IFLA_BRPORT_PRIORITY:
2247 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2248 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2249 case QEMU_IFLA_BRPORT_ID:
2250 case QEMU_IFLA_BRPORT_NO:
2251 u16 = NLA_DATA(nlattr);
2252 *u16 = tswap16(*u16);
2255 case QEMU_IFLA_BRPORT_COST:
2256 u32 = NLA_DATA(nlattr);
2257 *u32 = tswap32(*u32);
2260 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2261 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2262 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2263 u64 = NLA_DATA(nlattr);
2264 *u64 = tswap64(*u64);
2266 /* ifla_bridge_id: uint8_t[] */
2267 case QEMU_IFLA_BRPORT_ROOT_ID:
2268 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2271 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2277 struct linkinfo_context {
2284 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2287 struct linkinfo_context *li_context = context;
2289 switch (nlattr->nla_type) {
2291 case QEMU_IFLA_INFO_KIND:
2292 li_context->name = NLA_DATA(nlattr);
2293 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2295 case QEMU_IFLA_INFO_SLAVE_KIND:
2296 li_context->slave_name = NLA_DATA(nlattr);
2297 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2300 case QEMU_IFLA_INFO_XSTATS:
2301 /* FIXME: only used by CAN */
2304 case QEMU_IFLA_INFO_DATA:
2305 if (strncmp(li_context->name, "bridge",
2306 li_context->len) == 0) {
2307 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2310 host_to_target_data_bridge_nlattr);
2312 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2315 case QEMU_IFLA_INFO_SLAVE_DATA:
2316 if (strncmp(li_context->slave_name, "bridge",
2317 li_context->slave_len) == 0) {
2318 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2321 host_to_target_slave_data_bridge_nlattr);
2323 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2324 li_context->slave_name);
2328 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2335 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2341 switch (nlattr->nla_type) {
2342 case QEMU_IFLA_INET_CONF:
2343 u32 = NLA_DATA(nlattr);
2344 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2346 u32[i] = tswap32(u32[i]);
2350 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2355 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2360 struct ifla_cacheinfo *ci;
2363 switch (nlattr->nla_type) {
2365 case QEMU_IFLA_INET6_TOKEN:
2368 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2371 case QEMU_IFLA_INET6_FLAGS:
2372 u32 = NLA_DATA(nlattr);
2373 *u32 = tswap32(*u32);
2376 case QEMU_IFLA_INET6_CONF:
2377 u32 = NLA_DATA(nlattr);
2378 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2380 u32[i] = tswap32(u32[i]);
2383 /* ifla_cacheinfo */
2384 case QEMU_IFLA_INET6_CACHEINFO:
2385 ci = NLA_DATA(nlattr);
2386 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2387 ci->tstamp = tswap32(ci->tstamp);
2388 ci->reachable_time = tswap32(ci->reachable_time);
2389 ci->retrans_time = tswap32(ci->retrans_time);
2392 case QEMU_IFLA_INET6_STATS:
2393 case QEMU_IFLA_INET6_ICMP6STATS:
2394 u64 = NLA_DATA(nlattr);
2395 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2397 u64[i] = tswap64(u64[i]);
2401 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2406 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2409 switch (nlattr->nla_type) {
2411 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2413 host_to_target_data_inet_nlattr);
2415 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2417 host_to_target_data_inet6_nlattr);
2419 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2425 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2428 struct rtnl_link_stats *st;
2429 struct rtnl_link_stats64 *st64;
2430 struct rtnl_link_ifmap *map;
2431 struct linkinfo_context li_context;
2433 switch (rtattr->rta_type) {
2435 case QEMU_IFLA_ADDRESS:
2436 case QEMU_IFLA_BROADCAST:
2438 case QEMU_IFLA_IFNAME:
2439 case QEMU_IFLA_QDISC:
2442 case QEMU_IFLA_OPERSTATE:
2443 case QEMU_IFLA_LINKMODE:
2444 case QEMU_IFLA_CARRIER:
2445 case QEMU_IFLA_PROTO_DOWN:
2449 case QEMU_IFLA_LINK:
2450 case QEMU_IFLA_WEIGHT:
2451 case QEMU_IFLA_TXQLEN:
2452 case QEMU_IFLA_CARRIER_CHANGES:
2453 case QEMU_IFLA_NUM_RX_QUEUES:
2454 case QEMU_IFLA_NUM_TX_QUEUES:
2455 case QEMU_IFLA_PROMISCUITY:
2456 case QEMU_IFLA_EXT_MASK:
2457 case QEMU_IFLA_LINK_NETNSID:
2458 case QEMU_IFLA_GROUP:
2459 case QEMU_IFLA_MASTER:
2460 case QEMU_IFLA_NUM_VF:
2461 case QEMU_IFLA_GSO_MAX_SEGS:
2462 case QEMU_IFLA_GSO_MAX_SIZE:
2463 u32 = RTA_DATA(rtattr);
2464 *u32 = tswap32(*u32);
2466 /* struct rtnl_link_stats */
2467 case QEMU_IFLA_STATS:
2468 st = RTA_DATA(rtattr);
2469 st->rx_packets = tswap32(st->rx_packets);
2470 st->tx_packets = tswap32(st->tx_packets);
2471 st->rx_bytes = tswap32(st->rx_bytes);
2472 st->tx_bytes = tswap32(st->tx_bytes);
2473 st->rx_errors = tswap32(st->rx_errors);
2474 st->tx_errors = tswap32(st->tx_errors);
2475 st->rx_dropped = tswap32(st->rx_dropped);
2476 st->tx_dropped = tswap32(st->tx_dropped);
2477 st->multicast = tswap32(st->multicast);
2478 st->collisions = tswap32(st->collisions);
2480 /* detailed rx_errors: */
2481 st->rx_length_errors = tswap32(st->rx_length_errors);
2482 st->rx_over_errors = tswap32(st->rx_over_errors);
2483 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2484 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2485 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2486 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2488 /* detailed tx_errors */
2489 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2490 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2491 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2492 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2493 st->tx_window_errors = tswap32(st->tx_window_errors);
2496 st->rx_compressed = tswap32(st->rx_compressed);
2497 st->tx_compressed = tswap32(st->tx_compressed);
2499 /* struct rtnl_link_stats64 */
2500 case QEMU_IFLA_STATS64:
2501 st64 = RTA_DATA(rtattr);
2502 st64->rx_packets = tswap64(st64->rx_packets);
2503 st64->tx_packets = tswap64(st64->tx_packets);
2504 st64->rx_bytes = tswap64(st64->rx_bytes);
2505 st64->tx_bytes = tswap64(st64->tx_bytes);
2506 st64->rx_errors = tswap64(st64->rx_errors);
2507 st64->tx_errors = tswap64(st64->tx_errors);
2508 st64->rx_dropped = tswap64(st64->rx_dropped);
2509 st64->tx_dropped = tswap64(st64->tx_dropped);
2510 st64->multicast = tswap64(st64->multicast);
2511 st64->collisions = tswap64(st64->collisions);
2513 /* detailed rx_errors: */
2514 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2515 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2516 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2517 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2518 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2519 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2521 /* detailed tx_errors */
2522 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2523 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2524 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2525 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2526 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2529 st64->rx_compressed = tswap64(st64->rx_compressed);
2530 st64->tx_compressed = tswap64(st64->tx_compressed);
2532 /* struct rtnl_link_ifmap */
2534 map = RTA_DATA(rtattr);
2535 map->mem_start = tswap64(map->mem_start);
2536 map->mem_end = tswap64(map->mem_end);
2537 map->base_addr = tswap64(map->base_addr);
2538 map->irq = tswap16(map->irq);
2541 case QEMU_IFLA_LINKINFO:
2542 memset(&li_context, 0, sizeof(li_context));
2543 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2545 host_to_target_data_linkinfo_nlattr);
2546 case QEMU_IFLA_AF_SPEC:
2547 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2549 host_to_target_data_spec_nlattr);
2551 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2557 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2560 struct ifa_cacheinfo *ci;
2562 switch (rtattr->rta_type) {
2563 /* binary: depends on family type */
2573 u32 = RTA_DATA(rtattr);
2574 *u32 = tswap32(*u32);
2576 /* struct ifa_cacheinfo */
2578 ci = RTA_DATA(rtattr);
2579 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2580 ci->ifa_valid = tswap32(ci->ifa_valid);
2581 ci->cstamp = tswap32(ci->cstamp);
2582 ci->tstamp = tswap32(ci->tstamp);
2585 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2591 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2594 switch (rtattr->rta_type) {
2595 /* binary: depends on family type */
2604 u32 = RTA_DATA(rtattr);
2605 *u32 = tswap32(*u32);
2608 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2614 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2615 uint32_t rtattr_len)
2617 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2618 host_to_target_data_link_rtattr);
2621 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2622 uint32_t rtattr_len)
2624 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2625 host_to_target_data_addr_rtattr);
2628 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2629 uint32_t rtattr_len)
2631 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2632 host_to_target_data_route_rtattr);
2635 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2638 struct ifinfomsg *ifi;
2639 struct ifaddrmsg *ifa;
2642 nlmsg_len = nlh->nlmsg_len;
2643 switch (nlh->nlmsg_type) {
2647 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2648 ifi = NLMSG_DATA(nlh);
2649 ifi->ifi_type = tswap16(ifi->ifi_type);
2650 ifi->ifi_index = tswap32(ifi->ifi_index);
2651 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2652 ifi->ifi_change = tswap32(ifi->ifi_change);
2653 host_to_target_link_rtattr(IFLA_RTA(ifi),
2654 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2660 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2661 ifa = NLMSG_DATA(nlh);
2662 ifa->ifa_index = tswap32(ifa->ifa_index);
2663 host_to_target_addr_rtattr(IFA_RTA(ifa),
2664 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2670 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2671 rtm = NLMSG_DATA(nlh);
2672 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2673 host_to_target_route_rtattr(RTM_RTA(rtm),
2674 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2678 return -TARGET_EINVAL;
2683 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2686 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2689 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2691 abi_long (*target_to_host_rtattr)
2696 while (len >= sizeof(struct rtattr)) {
2697 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2698 tswap16(rtattr->rta_len) > len) {
2701 rtattr->rta_len = tswap16(rtattr->rta_len);
2702 rtattr->rta_type = tswap16(rtattr->rta_type);
2703 ret = target_to_host_rtattr(rtattr);
2707 len -= RTA_ALIGN(rtattr->rta_len);
2708 rtattr = (struct rtattr *)(((char *)rtattr) +
2709 RTA_ALIGN(rtattr->rta_len));
2714 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2716 switch (rtattr->rta_type) {
2718 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2724 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2726 switch (rtattr->rta_type) {
2727 /* binary: depends on family type */
2732 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2738 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2741 switch (rtattr->rta_type) {
2742 /* binary: depends on family type */
2750 u32 = RTA_DATA(rtattr);
2751 *u32 = tswap32(*u32);
2754 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2760 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2761 uint32_t rtattr_len)
2763 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2764 target_to_host_data_link_rtattr);
2767 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2768 uint32_t rtattr_len)
2770 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2771 target_to_host_data_addr_rtattr);
2774 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2775 uint32_t rtattr_len)
2777 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2778 target_to_host_data_route_rtattr);
2781 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2783 struct ifinfomsg *ifi;
2784 struct ifaddrmsg *ifa;
2787 switch (nlh->nlmsg_type) {
2792 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2793 ifi = NLMSG_DATA(nlh);
2794 ifi->ifi_type = tswap16(ifi->ifi_type);
2795 ifi->ifi_index = tswap32(ifi->ifi_index);
2796 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2797 ifi->ifi_change = tswap32(ifi->ifi_change);
2798 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2799 NLMSG_LENGTH(sizeof(*ifi)));
2805 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2806 ifa = NLMSG_DATA(nlh);
2807 ifa->ifa_index = tswap32(ifa->ifa_index);
2808 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2809 NLMSG_LENGTH(sizeof(*ifa)));
2816 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2817 rtm = NLMSG_DATA(nlh);
2818 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2819 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2820 NLMSG_LENGTH(sizeof(*rtm)));
2824 return -TARGET_EOPNOTSUPP;
2829 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2831 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2833 #endif /* CONFIG_RTNETLINK */
2835 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2837 switch (nlh->nlmsg_type) {
2839 gemu_log("Unknown host audit message type %d\n",
2841 return -TARGET_EINVAL;
2846 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2849 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2852 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2854 switch (nlh->nlmsg_type) {
2856 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2857 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2860 gemu_log("Unknown target audit message type %d\n",
2862 return -TARGET_EINVAL;
2868 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2870 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2873 /* do_setsockopt() Must return target values and target errnos. */
2874 static abi_long do_setsockopt(int sockfd, int level, int optname,
2875 abi_ulong optval_addr, socklen_t optlen)
2879 struct ip_mreqn *ip_mreq;
2880 struct ip_mreq_source *ip_mreq_source;
2884 /* TCP options all take an 'int' value. */
2885 if (optlen < sizeof(uint32_t))
2886 return -TARGET_EINVAL;
2888 if (get_user_u32(val, optval_addr))
2889 return -TARGET_EFAULT;
2890 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2897 case IP_ROUTER_ALERT:
2901 case IP_MTU_DISCOVER:
2908 case IP_MULTICAST_TTL:
2909 case IP_MULTICAST_LOOP:
2911 if (optlen >= sizeof(uint32_t)) {
2912 if (get_user_u32(val, optval_addr))
2913 return -TARGET_EFAULT;
2914 } else if (optlen >= 1) {
2915 if (get_user_u8(val, optval_addr))
2916 return -TARGET_EFAULT;
2918 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2920 case IP_ADD_MEMBERSHIP:
2921 case IP_DROP_MEMBERSHIP:
2922 if (optlen < sizeof (struct target_ip_mreq) ||
2923 optlen > sizeof (struct target_ip_mreqn))
2924 return -TARGET_EINVAL;
2926 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2927 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2928 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2931 case IP_BLOCK_SOURCE:
2932 case IP_UNBLOCK_SOURCE:
2933 case IP_ADD_SOURCE_MEMBERSHIP:
2934 case IP_DROP_SOURCE_MEMBERSHIP:
2935 if (optlen != sizeof (struct target_ip_mreq_source))
2936 return -TARGET_EINVAL;
2938 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2939 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2940 unlock_user (ip_mreq_source, optval_addr, 0);
2949 case IPV6_MTU_DISCOVER:
2952 case IPV6_RECVPKTINFO:
2953 case IPV6_UNICAST_HOPS:
2955 case IPV6_RECVHOPLIMIT:
2956 case IPV6_2292HOPLIMIT:
2959 if (optlen < sizeof(uint32_t)) {
2960 return -TARGET_EINVAL;
2962 if (get_user_u32(val, optval_addr)) {
2963 return -TARGET_EFAULT;
2965 ret = get_errno(setsockopt(sockfd, level, optname,
2966 &val, sizeof(val)));
2970 struct in6_pktinfo pki;
2972 if (optlen < sizeof(pki)) {
2973 return -TARGET_EINVAL;
2976 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2977 return -TARGET_EFAULT;
2980 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2982 ret = get_errno(setsockopt(sockfd, level, optname,
2983 &pki, sizeof(pki)));
2994 struct icmp6_filter icmp6f;
2996 if (optlen > sizeof(icmp6f)) {
2997 optlen = sizeof(icmp6f);
3000 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3001 return -TARGET_EFAULT;
3004 for (val = 0; val < 8; val++) {
3005 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3008 ret = get_errno(setsockopt(sockfd, level, optname,
3020 /* those take an u32 value */
3021 if (optlen < sizeof(uint32_t)) {
3022 return -TARGET_EINVAL;
3025 if (get_user_u32(val, optval_addr)) {
3026 return -TARGET_EFAULT;
3028 ret = get_errno(setsockopt(sockfd, level, optname,
3029 &val, sizeof(val)));
3036 case TARGET_SOL_SOCKET:
3038 case TARGET_SO_RCVTIMEO:
3042 optname = SO_RCVTIMEO;
3045 if (optlen != sizeof(struct target_timeval)) {
3046 return -TARGET_EINVAL;
3049 if (copy_from_user_timeval(&tv, optval_addr)) {
3050 return -TARGET_EFAULT;
3053 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3057 case TARGET_SO_SNDTIMEO:
3058 optname = SO_SNDTIMEO;
3060 case TARGET_SO_ATTACH_FILTER:
3062 struct target_sock_fprog *tfprog;
3063 struct target_sock_filter *tfilter;
3064 struct sock_fprog fprog;
3065 struct sock_filter *filter;
3068 if (optlen != sizeof(*tfprog)) {
3069 return -TARGET_EINVAL;
3071 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3072 return -TARGET_EFAULT;
3074 if (!lock_user_struct(VERIFY_READ, tfilter,
3075 tswapal(tfprog->filter), 0)) {
3076 unlock_user_struct(tfprog, optval_addr, 1);
3077 return -TARGET_EFAULT;
3080 fprog.len = tswap16(tfprog->len);
3081 filter = g_try_new(struct sock_filter, fprog.len);
3082 if (filter == NULL) {
3083 unlock_user_struct(tfilter, tfprog->filter, 1);
3084 unlock_user_struct(tfprog, optval_addr, 1);
3085 return -TARGET_ENOMEM;
3087 for (i = 0; i < fprog.len; i++) {
3088 filter[i].code = tswap16(tfilter[i].code);
3089 filter[i].jt = tfilter[i].jt;
3090 filter[i].jf = tfilter[i].jf;
3091 filter[i].k = tswap32(tfilter[i].k);
3093 fprog.filter = filter;
3095 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3096 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3099 unlock_user_struct(tfilter, tfprog->filter, 1);
3100 unlock_user_struct(tfprog, optval_addr, 1);
3103 case TARGET_SO_BINDTODEVICE:
3105 char *dev_ifname, *addr_ifname;
3107 if (optlen > IFNAMSIZ - 1) {
3108 optlen = IFNAMSIZ - 1;
3110 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3112 return -TARGET_EFAULT;
3114 optname = SO_BINDTODEVICE;
3115 addr_ifname = alloca(IFNAMSIZ);
3116 memcpy(addr_ifname, dev_ifname, optlen);
3117 addr_ifname[optlen] = 0;
3118 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3119 addr_ifname, optlen));
3120 unlock_user (dev_ifname, optval_addr, 0);
3123 /* Options with 'int' argument. */
3124 case TARGET_SO_DEBUG:
3127 case TARGET_SO_REUSEADDR:
3128 optname = SO_REUSEADDR;
3130 case TARGET_SO_TYPE:
3133 case TARGET_SO_ERROR:
3136 case TARGET_SO_DONTROUTE:
3137 optname = SO_DONTROUTE;
3139 case TARGET_SO_BROADCAST:
3140 optname = SO_BROADCAST;
3142 case TARGET_SO_SNDBUF:
3143 optname = SO_SNDBUF;
3145 case TARGET_SO_SNDBUFFORCE:
3146 optname = SO_SNDBUFFORCE;
3148 case TARGET_SO_RCVBUF:
3149 optname = SO_RCVBUF;
3151 case TARGET_SO_RCVBUFFORCE:
3152 optname = SO_RCVBUFFORCE;
3154 case TARGET_SO_KEEPALIVE:
3155 optname = SO_KEEPALIVE;
3157 case TARGET_SO_OOBINLINE:
3158 optname = SO_OOBINLINE;
3160 case TARGET_SO_NO_CHECK:
3161 optname = SO_NO_CHECK;
3163 case TARGET_SO_PRIORITY:
3164 optname = SO_PRIORITY;
3167 case TARGET_SO_BSDCOMPAT:
3168 optname = SO_BSDCOMPAT;
3171 case TARGET_SO_PASSCRED:
3172 optname = SO_PASSCRED;
3174 case TARGET_SO_PASSSEC:
3175 optname = SO_PASSSEC;
3177 case TARGET_SO_TIMESTAMP:
3178 optname = SO_TIMESTAMP;
3180 case TARGET_SO_RCVLOWAT:
3181 optname = SO_RCVLOWAT;
3186 if (optlen < sizeof(uint32_t))
3187 return -TARGET_EINVAL;
3189 if (get_user_u32(val, optval_addr))
3190 return -TARGET_EFAULT;
3191 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3195 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3196 ret = -TARGET_ENOPROTOOPT;
3201 /* do_getsockopt() Must return target values and target errnos. */
3202 static abi_long do_getsockopt(int sockfd, int level, int optname,
3203 abi_ulong optval_addr, abi_ulong optlen)
3210 case TARGET_SOL_SOCKET:
3213 /* These don't just return a single integer */
3214 case TARGET_SO_LINGER:
3215 case TARGET_SO_RCVTIMEO:
3216 case TARGET_SO_SNDTIMEO:
3217 case TARGET_SO_PEERNAME:
3219 case TARGET_SO_PEERCRED: {
3222 struct target_ucred *tcr;
3224 if (get_user_u32(len, optlen)) {
3225 return -TARGET_EFAULT;
3228 return -TARGET_EINVAL;
3232 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3240 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3241 return -TARGET_EFAULT;
3243 __put_user(cr.pid, &tcr->pid);
3244 __put_user(cr.uid, &tcr->uid);
3245 __put_user(cr.gid, &tcr->gid);
3246 unlock_user_struct(tcr, optval_addr, 1);
3247 if (put_user_u32(len, optlen)) {
3248 return -TARGET_EFAULT;
3252 /* Options with 'int' argument. */
3253 case TARGET_SO_DEBUG:
3256 case TARGET_SO_REUSEADDR:
3257 optname = SO_REUSEADDR;
3259 case TARGET_SO_TYPE:
3262 case TARGET_SO_ERROR:
3265 case TARGET_SO_DONTROUTE:
3266 optname = SO_DONTROUTE;
3268 case TARGET_SO_BROADCAST:
3269 optname = SO_BROADCAST;
3271 case TARGET_SO_SNDBUF:
3272 optname = SO_SNDBUF;
3274 case TARGET_SO_RCVBUF:
3275 optname = SO_RCVBUF;
3277 case TARGET_SO_KEEPALIVE:
3278 optname = SO_KEEPALIVE;
3280 case TARGET_SO_OOBINLINE:
3281 optname = SO_OOBINLINE;
3283 case TARGET_SO_NO_CHECK:
3284 optname = SO_NO_CHECK;
3286 case TARGET_SO_PRIORITY:
3287 optname = SO_PRIORITY;
3290 case TARGET_SO_BSDCOMPAT:
3291 optname = SO_BSDCOMPAT;
3294 case TARGET_SO_PASSCRED:
3295 optname = SO_PASSCRED;
3297 case TARGET_SO_TIMESTAMP:
3298 optname = SO_TIMESTAMP;
3300 case TARGET_SO_RCVLOWAT:
3301 optname = SO_RCVLOWAT;
3303 case TARGET_SO_ACCEPTCONN:
3304 optname = SO_ACCEPTCONN;
3311 /* TCP options all take an 'int' value. */
3313 if (get_user_u32(len, optlen))
3314 return -TARGET_EFAULT;
3316 return -TARGET_EINVAL;
3318 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3321 if (optname == SO_TYPE) {
3322 val = host_to_target_sock_type(val);
3327 if (put_user_u32(val, optval_addr))
3328 return -TARGET_EFAULT;
3330 if (put_user_u8(val, optval_addr))
3331 return -TARGET_EFAULT;
3333 if (put_user_u32(len, optlen))
3334 return -TARGET_EFAULT;
3341 case IP_ROUTER_ALERT:
3345 case IP_MTU_DISCOVER:
3351 case IP_MULTICAST_TTL:
3352 case IP_MULTICAST_LOOP:
3353 if (get_user_u32(len, optlen))
3354 return -TARGET_EFAULT;
3356 return -TARGET_EINVAL;
3358 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3361 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3363 if (put_user_u32(len, optlen)
3364 || put_user_u8(val, optval_addr))
3365 return -TARGET_EFAULT;
3367 if (len > sizeof(int))
3369 if (put_user_u32(len, optlen)
3370 || put_user_u32(val, optval_addr))
3371 return -TARGET_EFAULT;
3375 ret = -TARGET_ENOPROTOOPT;
3381 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3383 ret = -TARGET_EOPNOTSUPP;
3389 /* Convert target low/high pair representing file offset into the host
3390 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3391 * as the kernel doesn't handle them either.
3393 static void target_to_host_low_high(abi_ulong tlow,
3395 unsigned long *hlow,
3396 unsigned long *hhigh)
3398 uint64_t off = tlow |
3399 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3400 TARGET_LONG_BITS / 2;
3403 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3406 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3407 abi_ulong count, int copy)
3409 struct target_iovec *target_vec;
3411 abi_ulong total_len, max_len;
3414 bool bad_address = false;
3420 if (count > IOV_MAX) {
3425 vec = g_try_new0(struct iovec, count);
3431 target_vec = lock_user(VERIFY_READ, target_addr,
3432 count * sizeof(struct target_iovec), 1);
3433 if (target_vec == NULL) {
3438 /* ??? If host page size > target page size, this will result in a
3439 value larger than what we can actually support. */
3440 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3443 for (i = 0; i < count; i++) {
3444 abi_ulong base = tswapal(target_vec[i].iov_base);
3445 abi_long len = tswapal(target_vec[i].iov_len);
3450 } else if (len == 0) {
3451 /* Zero length pointer is ignored. */
3452 vec[i].iov_base = 0;
3454 vec[i].iov_base = lock_user(type, base, len, copy);
3455 /* If the first buffer pointer is bad, this is a fault. But
3456 * subsequent bad buffers will result in a partial write; this
3457 * is realized by filling the vector with null pointers and
3459 if (!vec[i].iov_base) {
3470 if (len > max_len - total_len) {
3471 len = max_len - total_len;
3474 vec[i].iov_len = len;
3478 unlock_user(target_vec, target_addr, 0);
3483 if (tswapal(target_vec[i].iov_len) > 0) {
3484 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3487 unlock_user(target_vec, target_addr, 0);
3494 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3495 abi_ulong count, int copy)
3497 struct target_iovec *target_vec;
3500 target_vec = lock_user(VERIFY_READ, target_addr,
3501 count * sizeof(struct target_iovec), 1);
3503 for (i = 0; i < count; i++) {
3504 abi_ulong base = tswapal(target_vec[i].iov_base);
3505 abi_long len = tswapal(target_vec[i].iov_len);
3509 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3511 unlock_user(target_vec, target_addr, 0);
3517 static inline int target_to_host_sock_type(int *type)
3520 int target_type = *type;
3522 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3523 case TARGET_SOCK_DGRAM:
3524 host_type = SOCK_DGRAM;
3526 case TARGET_SOCK_STREAM:
3527 host_type = SOCK_STREAM;
3530 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3533 if (target_type & TARGET_SOCK_CLOEXEC) {
3534 #if defined(SOCK_CLOEXEC)
3535 host_type |= SOCK_CLOEXEC;
3537 return -TARGET_EINVAL;
3540 if (target_type & TARGET_SOCK_NONBLOCK) {
3541 #if defined(SOCK_NONBLOCK)
3542 host_type |= SOCK_NONBLOCK;
3543 #elif !defined(O_NONBLOCK)
3544 return -TARGET_EINVAL;
3551 /* Try to emulate socket type flags after socket creation. */
3552 static int sock_flags_fixup(int fd, int target_type)
3554 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3555 if (target_type & TARGET_SOCK_NONBLOCK) {
3556 int flags = fcntl(fd, F_GETFL);
3557 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3559 return -TARGET_EINVAL;
3566 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3567 abi_ulong target_addr,
3570 struct sockaddr *addr = host_addr;
3571 struct target_sockaddr *target_saddr;
3573 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3574 if (!target_saddr) {
3575 return -TARGET_EFAULT;
3578 memcpy(addr, target_saddr, len);
3579 addr->sa_family = tswap16(target_saddr->sa_family);
3580 /* spkt_protocol is big-endian */
3582 unlock_user(target_saddr, target_addr, 0);
3586 static TargetFdTrans target_packet_trans = {
3587 .target_to_host_addr = packet_target_to_host_sockaddr,
3590 #ifdef CONFIG_RTNETLINK
3591 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3595 ret = target_to_host_nlmsg_route(buf, len);
3603 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3607 ret = host_to_target_nlmsg_route(buf, len);
3615 static TargetFdTrans target_netlink_route_trans = {
3616 .target_to_host_data = netlink_route_target_to_host,
3617 .host_to_target_data = netlink_route_host_to_target,
3619 #endif /* CONFIG_RTNETLINK */
3621 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3625 ret = target_to_host_nlmsg_audit(buf, len);
3633 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3637 ret = host_to_target_nlmsg_audit(buf, len);
3645 static TargetFdTrans target_netlink_audit_trans = {
3646 .target_to_host_data = netlink_audit_target_to_host,
3647 .host_to_target_data = netlink_audit_host_to_target,
3650 /* do_socket() Must return target values and target errnos. */
3651 static abi_long do_socket(int domain, int type, int protocol)
3653 int target_type = type;
3656 ret = target_to_host_sock_type(&type);
3661 if (domain == PF_NETLINK && !(
3662 #ifdef CONFIG_RTNETLINK
3663 protocol == NETLINK_ROUTE ||
3665 protocol == NETLINK_KOBJECT_UEVENT ||
3666 protocol == NETLINK_AUDIT)) {
3667 return -EPFNOSUPPORT;
3670 if (domain == AF_PACKET ||
3671 (domain == AF_INET && type == SOCK_PACKET)) {
3672 protocol = tswap16(protocol);
3675 ret = get_errno(socket(domain, type, protocol));
3677 ret = sock_flags_fixup(ret, target_type);
3678 if (type == SOCK_PACKET) {
3679 /* Manage an obsolete case :
3680 * if socket type is SOCK_PACKET, bind by name
3682 fd_trans_register(ret, &target_packet_trans);
3683 } else if (domain == PF_NETLINK) {
3685 #ifdef CONFIG_RTNETLINK
3687 fd_trans_register(ret, &target_netlink_route_trans);
3690 case NETLINK_KOBJECT_UEVENT:
3691 /* nothing to do: messages are strings */
3694 fd_trans_register(ret, &target_netlink_audit_trans);
3697 g_assert_not_reached();
3704 /* do_bind() Must return target values and target errnos. */
3705 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3711 if ((int)addrlen < 0) {
3712 return -TARGET_EINVAL;
3715 addr = alloca(addrlen+1);
3717 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3721 return get_errno(bind(sockfd, addr, addrlen));
3724 /* do_connect() Must return target values and target errnos. */
3725 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3731 if ((int)addrlen < 0) {
3732 return -TARGET_EINVAL;
3735 addr = alloca(addrlen+1);
3737 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3741 return get_errno(safe_connect(sockfd, addr, addrlen));
3744 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3745 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3746 int flags, int send)
3752 abi_ulong target_vec;
3754 if (msgp->msg_name) {
3755 msg.msg_namelen = tswap32(msgp->msg_namelen);
3756 msg.msg_name = alloca(msg.msg_namelen+1);
3757 ret = target_to_host_sockaddr(fd, msg.msg_name,
3758 tswapal(msgp->msg_name),
3760 if (ret == -TARGET_EFAULT) {
3761 /* For connected sockets msg_name and msg_namelen must
3762 * be ignored, so returning EFAULT immediately is wrong.
3763 * Instead, pass a bad msg_name to the host kernel, and
3764 * let it decide whether to return EFAULT or not.
3766 msg.msg_name = (void *)-1;
3771 msg.msg_name = NULL;
3772 msg.msg_namelen = 0;
3774 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3775 msg.msg_control = alloca(msg.msg_controllen);
3776 msg.msg_flags = tswap32(msgp->msg_flags);
3778 count = tswapal(msgp->msg_iovlen);
3779 target_vec = tswapal(msgp->msg_iov);
3781 if (count > IOV_MAX) {
3782 /* sendrcvmsg returns a different errno for this condition than
3783 * readv/writev, so we must catch it here before lock_iovec() does.
3785 ret = -TARGET_EMSGSIZE;
3789 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3790 target_vec, count, send);
3792 ret = -host_to_target_errno(errno);
3795 msg.msg_iovlen = count;
3799 if (fd_trans_target_to_host_data(fd)) {
3802 host_msg = g_malloc(msg.msg_iov->iov_len);
3803 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3804 ret = fd_trans_target_to_host_data(fd)(host_msg,
3805 msg.msg_iov->iov_len);
3807 msg.msg_iov->iov_base = host_msg;
3808 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3812 ret = target_to_host_cmsg(&msg, msgp);
3814 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3818 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3819 if (!is_error(ret)) {
3821 if (fd_trans_host_to_target_data(fd)) {
3822 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3825 ret = host_to_target_cmsg(msgp, &msg);
3827 if (!is_error(ret)) {
3828 msgp->msg_namelen = tswap32(msg.msg_namelen);
3829 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3830 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3831 msg.msg_name, msg.msg_namelen);
3843 unlock_iovec(vec, target_vec, count, !send);
3848 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3849 int flags, int send)
3852 struct target_msghdr *msgp;
3854 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3858 return -TARGET_EFAULT;
3860 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3861 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3865 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3866 * so it might not have this *mmsg-specific flag either.
3868 #ifndef MSG_WAITFORONE
3869 #define MSG_WAITFORONE 0x10000
3872 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3873 unsigned int vlen, unsigned int flags,
3876 struct target_mmsghdr *mmsgp;
3880 if (vlen > UIO_MAXIOV) {
3884 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3886 return -TARGET_EFAULT;
3889 for (i = 0; i < vlen; i++) {
3890 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3891 if (is_error(ret)) {
3894 mmsgp[i].msg_len = tswap32(ret);
3895 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3896 if (flags & MSG_WAITFORONE) {
3897 flags |= MSG_DONTWAIT;
3901 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3903 /* Return number of datagrams sent if we sent any at all;
3904 * otherwise return the error.
3912 /* do_accept4() Must return target values and target errnos. */
3913 static abi_long do_accept4(int fd, abi_ulong target_addr,
3914 abi_ulong target_addrlen_addr, int flags)
3921 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3923 if (target_addr == 0) {
3924 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3927 /* linux returns EINVAL if addrlen pointer is invalid */
3928 if (get_user_u32(addrlen, target_addrlen_addr))
3929 return -TARGET_EINVAL;
3931 if ((int)addrlen < 0) {
3932 return -TARGET_EINVAL;
3935 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3936 return -TARGET_EINVAL;
3938 addr = alloca(addrlen);
3940 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3941 if (!is_error(ret)) {
3942 host_to_target_sockaddr(target_addr, addr, addrlen);
3943 if (put_user_u32(addrlen, target_addrlen_addr))
3944 ret = -TARGET_EFAULT;
3949 /* do_getpeername() Must return target values and target errnos. */
3950 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3951 abi_ulong target_addrlen_addr)
3957 if (get_user_u32(addrlen, target_addrlen_addr))
3958 return -TARGET_EFAULT;
3960 if ((int)addrlen < 0) {
3961 return -TARGET_EINVAL;
3964 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3965 return -TARGET_EFAULT;
3967 addr = alloca(addrlen);
3969 ret = get_errno(getpeername(fd, addr, &addrlen));
3970 if (!is_error(ret)) {
3971 host_to_target_sockaddr(target_addr, addr, addrlen);
3972 if (put_user_u32(addrlen, target_addrlen_addr))
3973 ret = -TARGET_EFAULT;
3978 /* do_getsockname() Must return target values and target errnos. */
3979 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3980 abi_ulong target_addrlen_addr)
3986 if (get_user_u32(addrlen, target_addrlen_addr))
3987 return -TARGET_EFAULT;
3989 if ((int)addrlen < 0) {
3990 return -TARGET_EINVAL;
3993 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3994 return -TARGET_EFAULT;
3996 addr = alloca(addrlen);
3998 ret = get_errno(getsockname(fd, addr, &addrlen));
3999 if (!is_error(ret)) {
4000 host_to_target_sockaddr(target_addr, addr, addrlen);
4001 if (put_user_u32(addrlen, target_addrlen_addr))
4002 ret = -TARGET_EFAULT;
4007 /* do_socketpair() Must return target values and target errnos. */
4008 static abi_long do_socketpair(int domain, int type, int protocol,
4009 abi_ulong target_tab_addr)
4014 target_to_host_sock_type(&type);
4016 ret = get_errno(socketpair(domain, type, protocol, tab));
4017 if (!is_error(ret)) {
4018 if (put_user_s32(tab[0], target_tab_addr)
4019 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4020 ret = -TARGET_EFAULT;
4025 /* do_sendto() Must return target values and target errnos. */
4026 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4027 abi_ulong target_addr, socklen_t addrlen)
4031 void *copy_msg = NULL;
4034 if ((int)addrlen < 0) {
4035 return -TARGET_EINVAL;
4038 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4040 return -TARGET_EFAULT;
4041 if (fd_trans_target_to_host_data(fd)) {
4042 copy_msg = host_msg;
4043 host_msg = g_malloc(len);
4044 memcpy(host_msg, copy_msg, len);
4045 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4051 addr = alloca(addrlen+1);
4052 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4056 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4058 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4063 host_msg = copy_msg;
4065 unlock_user(host_msg, msg, 0);
4069 /* do_recvfrom() Must return target values and target errnos. */
4070 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4071 abi_ulong target_addr,
4072 abi_ulong target_addrlen)
4079 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4081 return -TARGET_EFAULT;
4083 if (get_user_u32(addrlen, target_addrlen)) {
4084 ret = -TARGET_EFAULT;
4087 if ((int)addrlen < 0) {
4088 ret = -TARGET_EINVAL;
4091 addr = alloca(addrlen);
4092 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4095 addr = NULL; /* To keep compiler quiet. */
4096 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4098 if (!is_error(ret)) {
4099 if (fd_trans_host_to_target_data(fd)) {
4100 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4103 host_to_target_sockaddr(target_addr, addr, addrlen);
4104 if (put_user_u32(addrlen, target_addrlen)) {
4105 ret = -TARGET_EFAULT;
4109 unlock_user(host_msg, msg, len);
4112 unlock_user(host_msg, msg, 0);
4117 #ifdef TARGET_NR_socketcall
4118 /* do_socketcall() must return target values and target errnos. */
4119 static abi_long do_socketcall(int num, abi_ulong vptr)
4121 static const unsigned nargs[] = { /* number of arguments per operation */
4122 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4123 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4124 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4125 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4126 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4127 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4128 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4129 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4130 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4131 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4132 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4133 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4134 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4135 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4136 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4137 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4138 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4139 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4140 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4141 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4143 abi_long a[6]; /* max 6 args */
4146 /* check the range of the first argument num */
4147 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4148 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4149 return -TARGET_EINVAL;
4151 /* ensure we have space for args */
4152 if (nargs[num] > ARRAY_SIZE(a)) {
4153 return -TARGET_EINVAL;
4155 /* collect the arguments in a[] according to nargs[] */
4156 for (i = 0; i < nargs[num]; ++i) {
4157 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4158 return -TARGET_EFAULT;
4161 /* now when we have the args, invoke the appropriate underlying function */
4163 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4164 return do_socket(a[0], a[1], a[2]);
4165 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4166 return do_bind(a[0], a[1], a[2]);
4167 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4168 return do_connect(a[0], a[1], a[2]);
4169 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4170 return get_errno(listen(a[0], a[1]));
4171 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4172 return do_accept4(a[0], a[1], a[2], 0);
4173 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4174 return do_getsockname(a[0], a[1], a[2]);
4175 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4176 return do_getpeername(a[0], a[1], a[2]);
4177 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4178 return do_socketpair(a[0], a[1], a[2], a[3]);
4179 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4180 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4181 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4182 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4183 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4184 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4185 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4186 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4187 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4188 return get_errno(shutdown(a[0], a[1]));
4189 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4190 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4191 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4192 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4193 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4194 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4195 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4196 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4197 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4198 return do_accept4(a[0], a[1], a[2], a[3]);
4199 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4200 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4201 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4202 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4204 gemu_log("Unsupported socketcall: %d\n", num);
4205 return -TARGET_EINVAL;
4210 #define N_SHM_REGIONS 32
4212 static struct shm_region {
4216 } shm_regions[N_SHM_REGIONS];
4218 #ifndef TARGET_SEMID64_DS
4219 /* asm-generic version of this struct */
4220 struct target_semid64_ds
4222 struct target_ipc_perm sem_perm;
4223 abi_ulong sem_otime;
4224 #if TARGET_ABI_BITS == 32
4225 abi_ulong __unused1;
4227 abi_ulong sem_ctime;
4228 #if TARGET_ABI_BITS == 32
4229 abi_ulong __unused2;
4231 abi_ulong sem_nsems;
4232 abi_ulong __unused3;
4233 abi_ulong __unused4;
4237 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4238 abi_ulong target_addr)
4240 struct target_ipc_perm *target_ip;
4241 struct target_semid64_ds *target_sd;
4243 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4244 return -TARGET_EFAULT;
4245 target_ip = &(target_sd->sem_perm);
4246 host_ip->__key = tswap32(target_ip->__key);
4247 host_ip->uid = tswap32(target_ip->uid);
4248 host_ip->gid = tswap32(target_ip->gid);
4249 host_ip->cuid = tswap32(target_ip->cuid);
4250 host_ip->cgid = tswap32(target_ip->cgid);
4251 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4252 host_ip->mode = tswap32(target_ip->mode);
4254 host_ip->mode = tswap16(target_ip->mode);
4256 #if defined(TARGET_PPC)
4257 host_ip->__seq = tswap32(target_ip->__seq);
4259 host_ip->__seq = tswap16(target_ip->__seq);
4261 unlock_user_struct(target_sd, target_addr, 0);
4265 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4266 struct ipc_perm *host_ip)
4268 struct target_ipc_perm *target_ip;
4269 struct target_semid64_ds *target_sd;
4271 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4272 return -TARGET_EFAULT;
4273 target_ip = &(target_sd->sem_perm);
4274 target_ip->__key = tswap32(host_ip->__key);
4275 target_ip->uid = tswap32(host_ip->uid);
4276 target_ip->gid = tswap32(host_ip->gid);
4277 target_ip->cuid = tswap32(host_ip->cuid);
4278 target_ip->cgid = tswap32(host_ip->cgid);
4279 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4280 target_ip->mode = tswap32(host_ip->mode);
4282 target_ip->mode = tswap16(host_ip->mode);
4284 #if defined(TARGET_PPC)
4285 target_ip->__seq = tswap32(host_ip->__seq);
4287 target_ip->__seq = tswap16(host_ip->__seq);
4289 unlock_user_struct(target_sd, target_addr, 1);
4293 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4294 abi_ulong target_addr)
4296 struct target_semid64_ds *target_sd;
4298 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4299 return -TARGET_EFAULT;
4300 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4301 return -TARGET_EFAULT;
4302 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4303 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4304 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4305 unlock_user_struct(target_sd, target_addr, 0);
4309 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4310 struct semid_ds *host_sd)
4312 struct target_semid64_ds *target_sd;
4314 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4315 return -TARGET_EFAULT;
4316 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4317 return -TARGET_EFAULT;
4318 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4319 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4320 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4321 unlock_user_struct(target_sd, target_addr, 1);
4325 struct target_seminfo {
4338 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4339 struct seminfo *host_seminfo)
4341 struct target_seminfo *target_seminfo;
4342 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4343 return -TARGET_EFAULT;
4344 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4345 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4346 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4347 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4348 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4349 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4350 __put_user(host_seminfo->semume, &target_seminfo->semume);
4351 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4352 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4353 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4354 unlock_user_struct(target_seminfo, target_addr, 1);
4360 struct semid_ds *buf;
4361 unsigned short *array;
4362 struct seminfo *__buf;
4365 union target_semun {
4372 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4373 abi_ulong target_addr)
4376 unsigned short *array;
4378 struct semid_ds semid_ds;
4381 semun.buf = &semid_ds;
4383 ret = semctl(semid, 0, IPC_STAT, semun);
4385 return get_errno(ret);
4387 nsems = semid_ds.sem_nsems;
4389 *host_array = g_try_new(unsigned short, nsems);
4391 return -TARGET_ENOMEM;
4393 array = lock_user(VERIFY_READ, target_addr,
4394 nsems*sizeof(unsigned short), 1);
4396 g_free(*host_array);
4397 return -TARGET_EFAULT;
4400 for(i=0; i<nsems; i++) {
4401 __get_user((*host_array)[i], &array[i]);
4403 unlock_user(array, target_addr, 0);
4408 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4409 unsigned short **host_array)
4412 unsigned short *array;
4414 struct semid_ds semid_ds;
4417 semun.buf = &semid_ds;
4419 ret = semctl(semid, 0, IPC_STAT, semun);
4421 return get_errno(ret);
4423 nsems = semid_ds.sem_nsems;
4425 array = lock_user(VERIFY_WRITE, target_addr,
4426 nsems*sizeof(unsigned short), 0);
4428 return -TARGET_EFAULT;
4430 for(i=0; i<nsems; i++) {
4431 __put_user((*host_array)[i], &array[i]);
4433 g_free(*host_array);
4434 unlock_user(array, target_addr, 1);
4439 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4440 abi_ulong target_arg)
4442 union target_semun target_su = { .buf = target_arg };
4444 struct semid_ds dsarg;
4445 unsigned short *array = NULL;
4446 struct seminfo seminfo;
4447 abi_long ret = -TARGET_EINVAL;
4454 /* In 64 bit cross-endian situations, we will erroneously pick up
4455 * the wrong half of the union for the "val" element. To rectify
4456 * this, the entire 8-byte structure is byteswapped, followed by
4457 * a swap of the 4 byte val field. In other cases, the data is
4458 * already in proper host byte order. */
4459 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4460 target_su.buf = tswapal(target_su.buf);
4461 arg.val = tswap32(target_su.val);
4463 arg.val = target_su.val;
4465 ret = get_errno(semctl(semid, semnum, cmd, arg));
4469 err = target_to_host_semarray(semid, &array, target_su.array);
4473 ret = get_errno(semctl(semid, semnum, cmd, arg));
4474 err = host_to_target_semarray(semid, target_su.array, &array);
4481 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4485 ret = get_errno(semctl(semid, semnum, cmd, arg));
4486 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4492 arg.__buf = &seminfo;
4493 ret = get_errno(semctl(semid, semnum, cmd, arg));
4494 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4502 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4509 struct target_sembuf {
4510 unsigned short sem_num;
4515 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4516 abi_ulong target_addr,
4519 struct target_sembuf *target_sembuf;
4522 target_sembuf = lock_user(VERIFY_READ, target_addr,
4523 nsops*sizeof(struct target_sembuf), 1);
4525 return -TARGET_EFAULT;
4527 for(i=0; i<nsops; i++) {
4528 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4529 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4530 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4533 unlock_user(target_sembuf, target_addr, 0);
4538 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4540 struct sembuf sops[nsops];
4542 if (target_to_host_sembuf(sops, ptr, nsops))
4543 return -TARGET_EFAULT;
4545 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4548 struct target_msqid_ds
4550 struct target_ipc_perm msg_perm;
4551 abi_ulong msg_stime;
4552 #if TARGET_ABI_BITS == 32
4553 abi_ulong __unused1;
4555 abi_ulong msg_rtime;
4556 #if TARGET_ABI_BITS == 32
4557 abi_ulong __unused2;
4559 abi_ulong msg_ctime;
4560 #if TARGET_ABI_BITS == 32
4561 abi_ulong __unused3;
4563 abi_ulong __msg_cbytes;
4565 abi_ulong msg_qbytes;
4566 abi_ulong msg_lspid;
4567 abi_ulong msg_lrpid;
4568 abi_ulong __unused4;
4569 abi_ulong __unused5;
4572 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4573 abi_ulong target_addr)
4575 struct target_msqid_ds *target_md;
4577 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4578 return -TARGET_EFAULT;
4579 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4580 return -TARGET_EFAULT;
4581 host_md->msg_stime = tswapal(target_md->msg_stime);
4582 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4583 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4584 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4585 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4586 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4587 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4588 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4589 unlock_user_struct(target_md, target_addr, 0);
4593 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4594 struct msqid_ds *host_md)
4596 struct target_msqid_ds *target_md;
4598 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4599 return -TARGET_EFAULT;
4600 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4601 return -TARGET_EFAULT;
4602 target_md->msg_stime = tswapal(host_md->msg_stime);
4603 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4604 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4605 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4606 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4607 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4608 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4609 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4610 unlock_user_struct(target_md, target_addr, 1);
4614 struct target_msginfo {
4622 unsigned short int msgseg;
4625 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4626 struct msginfo *host_msginfo)
4628 struct target_msginfo *target_msginfo;
4629 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4630 return -TARGET_EFAULT;
4631 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4632 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4633 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4634 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4635 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4636 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4637 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4638 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4639 unlock_user_struct(target_msginfo, target_addr, 1);
4643 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4645 struct msqid_ds dsarg;
4646 struct msginfo msginfo;
4647 abi_long ret = -TARGET_EINVAL;
4655 if (target_to_host_msqid_ds(&dsarg,ptr))
4656 return -TARGET_EFAULT;
4657 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4658 if (host_to_target_msqid_ds(ptr,&dsarg))
4659 return -TARGET_EFAULT;
4662 ret = get_errno(msgctl(msgid, cmd, NULL));
4666 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4667 if (host_to_target_msginfo(ptr, &msginfo))
4668 return -TARGET_EFAULT;
4675 struct target_msgbuf {
4680 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4681 ssize_t msgsz, int msgflg)
4683 struct target_msgbuf *target_mb;
4684 struct msgbuf *host_mb;
4688 return -TARGET_EINVAL;
4691 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4692 return -TARGET_EFAULT;
4693 host_mb = g_try_malloc(msgsz + sizeof(long));
4695 unlock_user_struct(target_mb, msgp, 0);
4696 return -TARGET_ENOMEM;
4698 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4699 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4700 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4702 unlock_user_struct(target_mb, msgp, 0);
4707 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4708 ssize_t msgsz, abi_long msgtyp,
4711 struct target_msgbuf *target_mb;
4713 struct msgbuf *host_mb;
4717 return -TARGET_EINVAL;
4720 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4721 return -TARGET_EFAULT;
4723 host_mb = g_try_malloc(msgsz + sizeof(long));
4725 ret = -TARGET_ENOMEM;
4728 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4731 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4732 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4733 if (!target_mtext) {
4734 ret = -TARGET_EFAULT;
4737 memcpy(target_mb->mtext, host_mb->mtext, ret);
4738 unlock_user(target_mtext, target_mtext_addr, ret);
4741 target_mb->mtype = tswapal(host_mb->mtype);
4745 unlock_user_struct(target_mb, msgp, 1);
4750 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4751 abi_ulong target_addr)
4753 struct target_shmid_ds *target_sd;
4755 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4756 return -TARGET_EFAULT;
4757 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4758 return -TARGET_EFAULT;
4759 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4760 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4761 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4762 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4763 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4764 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4765 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4766 unlock_user_struct(target_sd, target_addr, 0);
4770 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4771 struct shmid_ds *host_sd)
4773 struct target_shmid_ds *target_sd;
4775 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4776 return -TARGET_EFAULT;
4777 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4778 return -TARGET_EFAULT;
4779 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4780 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4781 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4782 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4783 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4784 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4785 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4786 unlock_user_struct(target_sd, target_addr, 1);
4790 struct target_shminfo {
4798 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4799 struct shminfo *host_shminfo)
4801 struct target_shminfo *target_shminfo;
4802 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4803 return -TARGET_EFAULT;
4804 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4805 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4806 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4807 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4808 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4809 unlock_user_struct(target_shminfo, target_addr, 1);
4813 struct target_shm_info {
4818 abi_ulong swap_attempts;
4819 abi_ulong swap_successes;
4822 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4823 struct shm_info *host_shm_info)
4825 struct target_shm_info *target_shm_info;
4826 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4827 return -TARGET_EFAULT;
4828 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4829 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4830 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4831 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4832 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4833 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4834 unlock_user_struct(target_shm_info, target_addr, 1);
4838 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4840 struct shmid_ds dsarg;
4841 struct shminfo shminfo;
4842 struct shm_info shm_info;
4843 abi_long ret = -TARGET_EINVAL;
4851 if (target_to_host_shmid_ds(&dsarg, buf))
4852 return -TARGET_EFAULT;
4853 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4854 if (host_to_target_shmid_ds(buf, &dsarg))
4855 return -TARGET_EFAULT;
4858 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4859 if (host_to_target_shminfo(buf, &shminfo))
4860 return -TARGET_EFAULT;
4863 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4864 if (host_to_target_shm_info(buf, &shm_info))
4865 return -TARGET_EFAULT;
4870 ret = get_errno(shmctl(shmid, cmd, NULL));
4877 #ifndef TARGET_FORCE_SHMLBA
4878 /* For most architectures, SHMLBA is the same as the page size;
4879 * some architectures have larger values, in which case they should
4880 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4881 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4882 * and defining its own value for SHMLBA.
4884 * The kernel also permits SHMLBA to be set by the architecture to a
4885 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4886 * this means that addresses are rounded to the large size if
4887 * SHM_RND is set but addresses not aligned to that size are not rejected
4888 * as long as they are at least page-aligned. Since the only architecture
4889 * which uses this is ia64 this code doesn't provide for that oddity.
4891 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4893 return TARGET_PAGE_SIZE;
4897 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4898 int shmid, abi_ulong shmaddr, int shmflg)
4902 struct shmid_ds shm_info;
4906 /* find out the length of the shared memory segment */
4907 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4908 if (is_error(ret)) {
4909 /* can't get length, bail out */
4913 shmlba = target_shmlba(cpu_env);
4915 if (shmaddr & (shmlba - 1)) {
4916 if (shmflg & SHM_RND) {
4917 shmaddr &= ~(shmlba - 1);
4919 return -TARGET_EINVAL;
4922 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4923 return -TARGET_EINVAL;
4929 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4931 abi_ulong mmap_start;
4933 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4935 if (mmap_start == -1) {
4937 host_raddr = (void *)-1;
4939 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4942 if (host_raddr == (void *)-1) {
4944 return get_errno((long)host_raddr);
4946 raddr=h2g((unsigned long)host_raddr);
4948 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4949 PAGE_VALID | PAGE_READ |
4950 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4952 for (i = 0; i < N_SHM_REGIONS; i++) {
4953 if (!shm_regions[i].in_use) {
4954 shm_regions[i].in_use = true;
4955 shm_regions[i].start = raddr;
4956 shm_regions[i].size = shm_info.shm_segsz;
4966 static inline abi_long do_shmdt(abi_ulong shmaddr)
4973 for (i = 0; i < N_SHM_REGIONS; ++i) {
4974 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4975 shm_regions[i].in_use = false;
4976 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4980 rv = get_errno(shmdt(g2h(shmaddr)));
4987 #ifdef TARGET_NR_ipc
4988 /* ??? This only works with linear mappings. */
4989 /* do_ipc() must return target values and target errnos. */
4990 static abi_long do_ipc(CPUArchState *cpu_env,
4991 unsigned int call, abi_long first,
4992 abi_long second, abi_long third,
4993 abi_long ptr, abi_long fifth)
4998 version = call >> 16;
5003 ret = do_semop(first, ptr, second);
5007 ret = get_errno(semget(first, second, third));
5010 case IPCOP_semctl: {
5011 /* The semun argument to semctl is passed by value, so dereference the
5014 get_user_ual(atptr, ptr);
5015 ret = do_semctl(first, second, third, atptr);
5020 ret = get_errno(msgget(first, second));
5024 ret = do_msgsnd(first, ptr, second, third);
5028 ret = do_msgctl(first, second, ptr);
5035 struct target_ipc_kludge {
5040 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5041 ret = -TARGET_EFAULT;
5045 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5047 unlock_user_struct(tmp, ptr, 0);
5051 ret = do_msgrcv(first, ptr, second, fifth, third);
5060 raddr = do_shmat(cpu_env, first, ptr, second);
5061 if (is_error(raddr))
5062 return get_errno(raddr);
5063 if (put_user_ual(raddr, third))
5064 return -TARGET_EFAULT;
5068 ret = -TARGET_EINVAL;
5073 ret = do_shmdt(ptr);
5077 /* IPC_* flag values are the same on all linux platforms */
5078 ret = get_errno(shmget(first, second, third));
5081 /* IPC_* and SHM_* command values are the same on all linux platforms */
5083 ret = do_shmctl(first, second, ptr);
5086 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5087 ret = -TARGET_ENOSYS;
5094 /* kernel structure types definitions */
5096 #define STRUCT(name, ...) STRUCT_ ## name,
5097 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5099 #include "syscall_types.h"
5103 #undef STRUCT_SPECIAL
5105 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5106 #define STRUCT_SPECIAL(name)
5107 #include "syscall_types.h"
5109 #undef STRUCT_SPECIAL
5111 typedef struct IOCTLEntry IOCTLEntry;
5113 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5114 int fd, int cmd, abi_long arg);
5118 unsigned int host_cmd;
5121 do_ioctl_fn *do_ioctl;
5122 const argtype arg_type[5];
5125 #define IOC_R 0x0001
5126 #define IOC_W 0x0002
5127 #define IOC_RW (IOC_R | IOC_W)
5129 #define MAX_STRUCT_SIZE 4096
5131 #ifdef CONFIG_FIEMAP
5132 /* So fiemap access checks don't overflow on 32 bit systems.
5133 * This is very slightly smaller than the limit imposed by
5134 * the underlying kernel.
5136 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5137 / sizeof(struct fiemap_extent))
5139 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5140 int fd, int cmd, abi_long arg)
5142 /* The parameter for this ioctl is a struct fiemap followed
5143 * by an array of struct fiemap_extent whose size is set
5144 * in fiemap->fm_extent_count. The array is filled in by the
5147 int target_size_in, target_size_out;
5149 const argtype *arg_type = ie->arg_type;
5150 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5153 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5157 assert(arg_type[0] == TYPE_PTR);
5158 assert(ie->access == IOC_RW);
5160 target_size_in = thunk_type_size(arg_type, 0);
5161 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5163 return -TARGET_EFAULT;
5165 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5166 unlock_user(argptr, arg, 0);
5167 fm = (struct fiemap *)buf_temp;
5168 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5169 return -TARGET_EINVAL;
5172 outbufsz = sizeof (*fm) +
5173 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5175 if (outbufsz > MAX_STRUCT_SIZE) {
5176 /* We can't fit all the extents into the fixed size buffer.
5177 * Allocate one that is large enough and use it instead.
5179 fm = g_try_malloc(outbufsz);
5181 return -TARGET_ENOMEM;
5183 memcpy(fm, buf_temp, sizeof(struct fiemap));
5186 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5187 if (!is_error(ret)) {
5188 target_size_out = target_size_in;
5189 /* An extent_count of 0 means we were only counting the extents
5190 * so there are no structs to copy
5192 if (fm->fm_extent_count != 0) {
5193 target_size_out += fm->fm_mapped_extents * extent_size;
5195 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5197 ret = -TARGET_EFAULT;
5199 /* Convert the struct fiemap */
5200 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5201 if (fm->fm_extent_count != 0) {
5202 p = argptr + target_size_in;
5203 /* ...and then all the struct fiemap_extents */
5204 for (i = 0; i < fm->fm_mapped_extents; i++) {
5205 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5210 unlock_user(argptr, arg, target_size_out);
5220 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5221 int fd, int cmd, abi_long arg)
5223 const argtype *arg_type = ie->arg_type;
5227 struct ifconf *host_ifconf;
5229 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5230 int target_ifreq_size;
5235 abi_long target_ifc_buf;
5239 assert(arg_type[0] == TYPE_PTR);
5240 assert(ie->access == IOC_RW);
5243 target_size = thunk_type_size(arg_type, 0);
5245 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5247 return -TARGET_EFAULT;
5248 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5249 unlock_user(argptr, arg, 0);
5251 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5252 target_ifc_len = host_ifconf->ifc_len;
5253 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5255 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5256 nb_ifreq = target_ifc_len / target_ifreq_size;
5257 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5259 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5260 if (outbufsz > MAX_STRUCT_SIZE) {
5261 /* We can't fit all the extents into the fixed size buffer.
5262 * Allocate one that is large enough and use it instead.
5264 host_ifconf = malloc(outbufsz);
5266 return -TARGET_ENOMEM;
5268 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5271 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5273 host_ifconf->ifc_len = host_ifc_len;
5274 host_ifconf->ifc_buf = host_ifc_buf;
5276 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5277 if (!is_error(ret)) {
5278 /* convert host ifc_len to target ifc_len */
5280 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5281 target_ifc_len = nb_ifreq * target_ifreq_size;
5282 host_ifconf->ifc_len = target_ifc_len;
5284 /* restore target ifc_buf */
5286 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5288 /* copy struct ifconf to target user */
5290 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5292 return -TARGET_EFAULT;
5293 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5294 unlock_user(argptr, arg, target_size);
5296 /* copy ifreq[] to target user */
5298 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5299 for (i = 0; i < nb_ifreq ; i++) {
5300 thunk_convert(argptr + i * target_ifreq_size,
5301 host_ifc_buf + i * sizeof(struct ifreq),
5302 ifreq_arg_type, THUNK_TARGET);
5304 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5314 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5315 int cmd, abi_long arg)
5318 struct dm_ioctl *host_dm;
5319 abi_long guest_data;
5320 uint32_t guest_data_size;
5322 const argtype *arg_type = ie->arg_type;
5324 void *big_buf = NULL;
5328 target_size = thunk_type_size(arg_type, 0);
5329 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5331 ret = -TARGET_EFAULT;
5334 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5335 unlock_user(argptr, arg, 0);
5337 /* buf_temp is too small, so fetch things into a bigger buffer */
5338 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5339 memcpy(big_buf, buf_temp, target_size);
5343 guest_data = arg + host_dm->data_start;
5344 if ((guest_data - arg) < 0) {
5345 ret = -TARGET_EINVAL;
5348 guest_data_size = host_dm->data_size - host_dm->data_start;
5349 host_data = (char*)host_dm + host_dm->data_start;
5351 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5353 ret = -TARGET_EFAULT;
5357 switch (ie->host_cmd) {
5359 case DM_LIST_DEVICES:
5362 case DM_DEV_SUSPEND:
5365 case DM_TABLE_STATUS:
5366 case DM_TABLE_CLEAR:
5368 case DM_LIST_VERSIONS:
5372 case DM_DEV_SET_GEOMETRY:
5373 /* data contains only strings */
5374 memcpy(host_data, argptr, guest_data_size);
5377 memcpy(host_data, argptr, guest_data_size);
5378 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5382 void *gspec = argptr;
5383 void *cur_data = host_data;
5384 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5385 int spec_size = thunk_type_size(arg_type, 0);
5388 for (i = 0; i < host_dm->target_count; i++) {
5389 struct dm_target_spec *spec = cur_data;
5393 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5394 slen = strlen((char*)gspec + spec_size) + 1;
5396 spec->next = sizeof(*spec) + slen;
5397 strcpy((char*)&spec[1], gspec + spec_size);
5399 cur_data += spec->next;
5404 ret = -TARGET_EINVAL;
5405 unlock_user(argptr, guest_data, 0);
5408 unlock_user(argptr, guest_data, 0);
5410 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5411 if (!is_error(ret)) {
5412 guest_data = arg + host_dm->data_start;
5413 guest_data_size = host_dm->data_size - host_dm->data_start;
5414 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5415 switch (ie->host_cmd) {
5420 case DM_DEV_SUSPEND:
5423 case DM_TABLE_CLEAR:
5425 case DM_DEV_SET_GEOMETRY:
5426 /* no return data */
5428 case DM_LIST_DEVICES:
5430 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5431 uint32_t remaining_data = guest_data_size;
5432 void *cur_data = argptr;
5433 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5434 int nl_size = 12; /* can't use thunk_size due to alignment */
5437 uint32_t next = nl->next;
5439 nl->next = nl_size + (strlen(nl->name) + 1);
5441 if (remaining_data < nl->next) {
5442 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5445 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5446 strcpy(cur_data + nl_size, nl->name);
5447 cur_data += nl->next;
5448 remaining_data -= nl->next;
5452 nl = (void*)nl + next;
5457 case DM_TABLE_STATUS:
5459 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5460 void *cur_data = argptr;
5461 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5462 int spec_size = thunk_type_size(arg_type, 0);
5465 for (i = 0; i < host_dm->target_count; i++) {
5466 uint32_t next = spec->next;
5467 int slen = strlen((char*)&spec[1]) + 1;
5468 spec->next = (cur_data - argptr) + spec_size + slen;
5469 if (guest_data_size < spec->next) {
5470 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5473 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5474 strcpy(cur_data + spec_size, (char*)&spec[1]);
5475 cur_data = argptr + spec->next;
5476 spec = (void*)host_dm + host_dm->data_start + next;
5482 void *hdata = (void*)host_dm + host_dm->data_start;
5483 int count = *(uint32_t*)hdata;
5484 uint64_t *hdev = hdata + 8;
5485 uint64_t *gdev = argptr + 8;
5488 *(uint32_t*)argptr = tswap32(count);
5489 for (i = 0; i < count; i++) {
5490 *gdev = tswap64(*hdev);
5496 case DM_LIST_VERSIONS:
5498 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5499 uint32_t remaining_data = guest_data_size;
5500 void *cur_data = argptr;
5501 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5502 int vers_size = thunk_type_size(arg_type, 0);
5505 uint32_t next = vers->next;
5507 vers->next = vers_size + (strlen(vers->name) + 1);
5509 if (remaining_data < vers->next) {
5510 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5513 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5514 strcpy(cur_data + vers_size, vers->name);
5515 cur_data += vers->next;
5516 remaining_data -= vers->next;
5520 vers = (void*)vers + next;
5525 unlock_user(argptr, guest_data, 0);
5526 ret = -TARGET_EINVAL;
5529 unlock_user(argptr, guest_data, guest_data_size);
5531 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5533 ret = -TARGET_EFAULT;
5536 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5537 unlock_user(argptr, arg, target_size);
5544 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5545 int cmd, abi_long arg)
5549 const argtype *arg_type = ie->arg_type;
5550 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5553 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5554 struct blkpg_partition host_part;
5556 /* Read and convert blkpg */
5558 target_size = thunk_type_size(arg_type, 0);
5559 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5561 ret = -TARGET_EFAULT;
5564 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5565 unlock_user(argptr, arg, 0);
5567 switch (host_blkpg->op) {
5568 case BLKPG_ADD_PARTITION:
5569 case BLKPG_DEL_PARTITION:
5570 /* payload is struct blkpg_partition */
5573 /* Unknown opcode */
5574 ret = -TARGET_EINVAL;
5578 /* Read and convert blkpg->data */
5579 arg = (abi_long)(uintptr_t)host_blkpg->data;
5580 target_size = thunk_type_size(part_arg_type, 0);
5581 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5583 ret = -TARGET_EFAULT;
5586 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5587 unlock_user(argptr, arg, 0);
5589 /* Swizzle the data pointer to our local copy and call! */
5590 host_blkpg->data = &host_part;
5591 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5597 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5598 int fd, int cmd, abi_long arg)
5600 const argtype *arg_type = ie->arg_type;
5601 const StructEntry *se;
5602 const argtype *field_types;
5603 const int *dst_offsets, *src_offsets;
5606 abi_ulong *target_rt_dev_ptr;
5607 unsigned long *host_rt_dev_ptr;
5611 assert(ie->access == IOC_W);
5612 assert(*arg_type == TYPE_PTR);
5614 assert(*arg_type == TYPE_STRUCT);
5615 target_size = thunk_type_size(arg_type, 0);
5616 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5618 return -TARGET_EFAULT;
5621 assert(*arg_type == (int)STRUCT_rtentry);
5622 se = struct_entries + *arg_type++;
5623 assert(se->convert[0] == NULL);
5624 /* convert struct here to be able to catch rt_dev string */
5625 field_types = se->field_types;
5626 dst_offsets = se->field_offsets[THUNK_HOST];
5627 src_offsets = se->field_offsets[THUNK_TARGET];
5628 for (i = 0; i < se->nb_fields; i++) {
5629 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5630 assert(*field_types == TYPE_PTRVOID);
5631 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5632 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5633 if (*target_rt_dev_ptr != 0) {
5634 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5635 tswapal(*target_rt_dev_ptr));
5636 if (!*host_rt_dev_ptr) {
5637 unlock_user(argptr, arg, 0);
5638 return -TARGET_EFAULT;
5641 *host_rt_dev_ptr = 0;
5646 field_types = thunk_convert(buf_temp + dst_offsets[i],
5647 argptr + src_offsets[i],
5648 field_types, THUNK_HOST);
5650 unlock_user(argptr, arg, 0);
5652 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5653 if (*host_rt_dev_ptr != 0) {
5654 unlock_user((void *)*host_rt_dev_ptr,
5655 *target_rt_dev_ptr, 0);
5660 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5661 int fd, int cmd, abi_long arg)
5663 int sig = target_to_host_signal(arg);
5664 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5668 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5669 int fd, int cmd, abi_long arg)
5671 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5672 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5676 static IOCTLEntry ioctl_entries[] = {
5677 #define IOCTL(cmd, access, ...) \
5678 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5679 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5680 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5681 #define IOCTL_IGNORE(cmd) \
5682 { TARGET_ ## cmd, 0, #cmd },
5687 /* ??? Implement proper locking for ioctls. */
5688 /* do_ioctl() Must return target values and target errnos. */
5689 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5691 const IOCTLEntry *ie;
5692 const argtype *arg_type;
5694 uint8_t buf_temp[MAX_STRUCT_SIZE];
5700 if (ie->target_cmd == 0) {
5701 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5702 return -TARGET_ENOSYS;
5704 if (ie->target_cmd == cmd)
5708 arg_type = ie->arg_type;
5710 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5713 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5714 } else if (!ie->host_cmd) {
5715 /* Some architectures define BSD ioctls in their headers
5716 that are not implemented in Linux. */
5717 return -TARGET_ENOSYS;
5720 switch(arg_type[0]) {
5723 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5727 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5731 target_size = thunk_type_size(arg_type, 0);
5732 switch(ie->access) {
5734 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5735 if (!is_error(ret)) {
5736 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5738 return -TARGET_EFAULT;
5739 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5740 unlock_user(argptr, arg, target_size);
5744 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5746 return -TARGET_EFAULT;
5747 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5748 unlock_user(argptr, arg, 0);
5749 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5753 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5755 return -TARGET_EFAULT;
5756 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5757 unlock_user(argptr, arg, 0);
5758 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5759 if (!is_error(ret)) {
5760 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5762 return -TARGET_EFAULT;
5763 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5764 unlock_user(argptr, arg, target_size);
5770 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5771 (long)cmd, arg_type[0]);
5772 ret = -TARGET_ENOSYS;
5778 static const bitmask_transtbl iflag_tbl[] = {
5779 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5780 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5781 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5782 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5783 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5784 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5785 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5786 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5787 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5788 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5789 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5790 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5791 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5792 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5796 static const bitmask_transtbl oflag_tbl[] = {
5797 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5798 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5799 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5800 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5801 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5802 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5803 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5804 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5805 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5806 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5807 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5808 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5809 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5810 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5811 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5812 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5813 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5814 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5815 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5816 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5817 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5818 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5819 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5820 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5824 static const bitmask_transtbl cflag_tbl[] = {
5825 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5826 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5827 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5828 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5829 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5830 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5831 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5832 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5833 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5834 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5835 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5836 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5837 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5838 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5839 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5840 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5841 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5842 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5843 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5844 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5845 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5846 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5847 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5848 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5849 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5850 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5851 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5852 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5853 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5854 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5855 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5859 static const bitmask_transtbl lflag_tbl[] = {
5860 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5861 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5862 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5863 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5864 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5865 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5866 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5867 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5868 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5869 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5870 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5871 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5872 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5873 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5874 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5878 static void target_to_host_termios (void *dst, const void *src)
5880 struct host_termios *host = dst;
5881 const struct target_termios *target = src;
5884 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5886 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5888 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5890 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5891 host->c_line = target->c_line;
5893 memset(host->c_cc, 0, sizeof(host->c_cc));
5894 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5895 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5896 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5897 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5898 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5899 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5900 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5901 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5902 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5903 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5904 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5905 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5906 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5907 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5908 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5909 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5910 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5913 static void host_to_target_termios (void *dst, const void *src)
5915 struct target_termios *target = dst;
5916 const struct host_termios *host = src;
5919 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5921 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5923 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5925 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5926 target->c_line = host->c_line;
5928 memset(target->c_cc, 0, sizeof(target->c_cc));
5929 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5930 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5931 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5932 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5933 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5934 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5935 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5936 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5937 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5938 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5939 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5940 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5941 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5942 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5943 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5944 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5945 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5948 static const StructEntry struct_termios_def = {
5949 .convert = { host_to_target_termios, target_to_host_termios },
5950 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5951 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5954 static bitmask_transtbl mmap_flags_tbl[] = {
5955 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5956 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5957 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5958 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5959 MAP_ANONYMOUS, MAP_ANONYMOUS },
5960 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5961 MAP_GROWSDOWN, MAP_GROWSDOWN },
5962 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5963 MAP_DENYWRITE, MAP_DENYWRITE },
5964 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5965 MAP_EXECUTABLE, MAP_EXECUTABLE },
5966 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5967 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5968 MAP_NORESERVE, MAP_NORESERVE },
5969 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5970 /* MAP_STACK had been ignored by the kernel for quite some time.
5971 Recognize it for the target insofar as we do not want to pass
5972 it through to the host. */
5973 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5977 #if defined(TARGET_I386)
5979 /* NOTE: there is really one LDT for all the threads */
5980 static uint8_t *ldt_table;
5982 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5989 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5990 if (size > bytecount)
5992 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5994 return -TARGET_EFAULT;
5995 /* ??? Should this by byteswapped? */
5996 memcpy(p, ldt_table, size);
5997 unlock_user(p, ptr, size);
6001 /* XXX: add locking support */
6002 static abi_long write_ldt(CPUX86State *env,
6003 abi_ulong ptr, unsigned long bytecount, int oldmode)
6005 struct target_modify_ldt_ldt_s ldt_info;
6006 struct target_modify_ldt_ldt_s *target_ldt_info;
6007 int seg_32bit, contents, read_exec_only, limit_in_pages;
6008 int seg_not_present, useable, lm;
6009 uint32_t *lp, entry_1, entry_2;
6011 if (bytecount != sizeof(ldt_info))
6012 return -TARGET_EINVAL;
6013 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6014 return -TARGET_EFAULT;
6015 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6016 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6017 ldt_info.limit = tswap32(target_ldt_info->limit);
6018 ldt_info.flags = tswap32(target_ldt_info->flags);
6019 unlock_user_struct(target_ldt_info, ptr, 0);
6021 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6022 return -TARGET_EINVAL;
6023 seg_32bit = ldt_info.flags & 1;
6024 contents = (ldt_info.flags >> 1) & 3;
6025 read_exec_only = (ldt_info.flags >> 3) & 1;
6026 limit_in_pages = (ldt_info.flags >> 4) & 1;
6027 seg_not_present = (ldt_info.flags >> 5) & 1;
6028 useable = (ldt_info.flags >> 6) & 1;
6032 lm = (ldt_info.flags >> 7) & 1;
6034 if (contents == 3) {
6036 return -TARGET_EINVAL;
6037 if (seg_not_present == 0)
6038 return -TARGET_EINVAL;
6040 /* allocate the LDT */
6042 env->ldt.base = target_mmap(0,
6043 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6044 PROT_READ|PROT_WRITE,
6045 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6046 if (env->ldt.base == -1)
6047 return -TARGET_ENOMEM;
6048 memset(g2h(env->ldt.base), 0,
6049 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6050 env->ldt.limit = 0xffff;
6051 ldt_table = g2h(env->ldt.base);
6054 /* NOTE: same code as Linux kernel */
6055 /* Allow LDTs to be cleared by the user. */
6056 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6059 read_exec_only == 1 &&
6061 limit_in_pages == 0 &&
6062 seg_not_present == 1 &&
6070 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6071 (ldt_info.limit & 0x0ffff);
6072 entry_2 = (ldt_info.base_addr & 0xff000000) |
6073 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6074 (ldt_info.limit & 0xf0000) |
6075 ((read_exec_only ^ 1) << 9) |
6077 ((seg_not_present ^ 1) << 15) |
6079 (limit_in_pages << 23) |
6083 entry_2 |= (useable << 20);
6085 /* Install the new entry ... */
6087 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6088 lp[0] = tswap32(entry_1);
6089 lp[1] = tswap32(entry_2);
6093 /* specific and weird i386 syscalls */
6094 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6095 unsigned long bytecount)
6101 ret = read_ldt(ptr, bytecount);
6104 ret = write_ldt(env, ptr, bytecount, 1);
6107 ret = write_ldt(env, ptr, bytecount, 0);
6110 ret = -TARGET_ENOSYS;
6116 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6117 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6119 uint64_t *gdt_table = g2h(env->gdt.base);
6120 struct target_modify_ldt_ldt_s ldt_info;
6121 struct target_modify_ldt_ldt_s *target_ldt_info;
6122 int seg_32bit, contents, read_exec_only, limit_in_pages;
6123 int seg_not_present, useable, lm;
6124 uint32_t *lp, entry_1, entry_2;
6127 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6128 if (!target_ldt_info)
6129 return -TARGET_EFAULT;
6130 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6131 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6132 ldt_info.limit = tswap32(target_ldt_info->limit);
6133 ldt_info.flags = tswap32(target_ldt_info->flags);
6134 if (ldt_info.entry_number == -1) {
6135 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6136 if (gdt_table[i] == 0) {
6137 ldt_info.entry_number = i;
6138 target_ldt_info->entry_number = tswap32(i);
6143 unlock_user_struct(target_ldt_info, ptr, 1);
6145 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6146 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6147 return -TARGET_EINVAL;
6148 seg_32bit = ldt_info.flags & 1;
6149 contents = (ldt_info.flags >> 1) & 3;
6150 read_exec_only = (ldt_info.flags >> 3) & 1;
6151 limit_in_pages = (ldt_info.flags >> 4) & 1;
6152 seg_not_present = (ldt_info.flags >> 5) & 1;
6153 useable = (ldt_info.flags >> 6) & 1;
6157 lm = (ldt_info.flags >> 7) & 1;
6160 if (contents == 3) {
6161 if (seg_not_present == 0)
6162 return -TARGET_EINVAL;
6165 /* NOTE: same code as Linux kernel */
6166 /* Allow LDTs to be cleared by the user. */
6167 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6168 if ((contents == 0 &&
6169 read_exec_only == 1 &&
6171 limit_in_pages == 0 &&
6172 seg_not_present == 1 &&
6180 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6181 (ldt_info.limit & 0x0ffff);
6182 entry_2 = (ldt_info.base_addr & 0xff000000) |
6183 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6184 (ldt_info.limit & 0xf0000) |
6185 ((read_exec_only ^ 1) << 9) |
6187 ((seg_not_present ^ 1) << 15) |
6189 (limit_in_pages << 23) |
6194 /* Install the new entry ... */
6196 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6197 lp[0] = tswap32(entry_1);
6198 lp[1] = tswap32(entry_2);
6202 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6204 struct target_modify_ldt_ldt_s *target_ldt_info;
6205 uint64_t *gdt_table = g2h(env->gdt.base);
6206 uint32_t base_addr, limit, flags;
6207 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6208 int seg_not_present, useable, lm;
6209 uint32_t *lp, entry_1, entry_2;
6211 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6212 if (!target_ldt_info)
6213 return -TARGET_EFAULT;
6214 idx = tswap32(target_ldt_info->entry_number);
6215 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6216 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6217 unlock_user_struct(target_ldt_info, ptr, 1);
6218 return -TARGET_EINVAL;
6220 lp = (uint32_t *)(gdt_table + idx);
6221 entry_1 = tswap32(lp[0]);
6222 entry_2 = tswap32(lp[1]);
6224 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6225 contents = (entry_2 >> 10) & 3;
6226 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6227 seg_32bit = (entry_2 >> 22) & 1;
6228 limit_in_pages = (entry_2 >> 23) & 1;
6229 useable = (entry_2 >> 20) & 1;
6233 lm = (entry_2 >> 21) & 1;
6235 flags = (seg_32bit << 0) | (contents << 1) |
6236 (read_exec_only << 3) | (limit_in_pages << 4) |
6237 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6238 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6239 base_addr = (entry_1 >> 16) |
6240 (entry_2 & 0xff000000) |
6241 ((entry_2 & 0xff) << 16);
6242 target_ldt_info->base_addr = tswapal(base_addr);
6243 target_ldt_info->limit = tswap32(limit);
6244 target_ldt_info->flags = tswap32(flags);
6245 unlock_user_struct(target_ldt_info, ptr, 1);
6248 #endif /* TARGET_I386 && TARGET_ABI32 */
6250 #ifndef TARGET_ABI32
6251 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6258 case TARGET_ARCH_SET_GS:
6259 case TARGET_ARCH_SET_FS:
6260 if (code == TARGET_ARCH_SET_GS)
6264 cpu_x86_load_seg(env, idx, 0);
6265 env->segs[idx].base = addr;
6267 case TARGET_ARCH_GET_GS:
6268 case TARGET_ARCH_GET_FS:
6269 if (code == TARGET_ARCH_GET_GS)
6273 val = env->segs[idx].base;
6274 if (put_user(val, addr, abi_ulong))
6275 ret = -TARGET_EFAULT;
6278 ret = -TARGET_EINVAL;
6285 #endif /* defined(TARGET_I386) */
6287 #define NEW_STACK_SIZE 0x40000
6290 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6293 pthread_mutex_t mutex;
6294 pthread_cond_t cond;
6297 abi_ulong child_tidptr;
6298 abi_ulong parent_tidptr;
6302 static void *clone_func(void *arg)
6304 new_thread_info *info = arg;
6309 rcu_register_thread();
6310 tcg_register_thread();
6312 cpu = ENV_GET_CPU(env);
6314 ts = (TaskState *)cpu->opaque;
6315 info->tid = gettid();
6317 if (info->child_tidptr)
6318 put_user_u32(info->tid, info->child_tidptr);
6319 if (info->parent_tidptr)
6320 put_user_u32(info->tid, info->parent_tidptr);
6321 /* Enable signals. */
6322 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6323 /* Signal to the parent that we're ready. */
6324 pthread_mutex_lock(&info->mutex);
6325 pthread_cond_broadcast(&info->cond);
6326 pthread_mutex_unlock(&info->mutex);
6327 /* Wait until the parent has finished initializing the tls state. */
6328 pthread_mutex_lock(&clone_lock);
6329 pthread_mutex_unlock(&clone_lock);
6335 /* do_fork() Must return host values and target errnos (unlike most
6336 do_*() functions). */
6337 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6338 abi_ulong parent_tidptr, target_ulong newtls,
6339 abi_ulong child_tidptr)
6341 CPUState *cpu = ENV_GET_CPU(env);
6345 CPUArchState *new_env;
6348 flags &= ~CLONE_IGNORED_FLAGS;
6350 /* Emulate vfork() with fork() */
6351 if (flags & CLONE_VFORK)
6352 flags &= ~(CLONE_VFORK | CLONE_VM);
6354 if (flags & CLONE_VM) {
6355 TaskState *parent_ts = (TaskState *)cpu->opaque;
6356 new_thread_info info;
6357 pthread_attr_t attr;
6359 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6360 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6361 return -TARGET_EINVAL;
6364 ts = g_new0(TaskState, 1);
6365 init_task_state(ts);
6367 /* Grab a mutex so that thread setup appears atomic. */
6368 pthread_mutex_lock(&clone_lock);
6370 /* we create a new CPU instance. */
6371 new_env = cpu_copy(env);
6372 /* Init regs that differ from the parent. */
6373 cpu_clone_regs(new_env, newsp);
6374 new_cpu = ENV_GET_CPU(new_env);
6375 new_cpu->opaque = ts;
6376 ts->bprm = parent_ts->bprm;
6377 ts->info = parent_ts->info;
6378 ts->signal_mask = parent_ts->signal_mask;
6380 if (flags & CLONE_CHILD_CLEARTID) {
6381 ts->child_tidptr = child_tidptr;
6384 if (flags & CLONE_SETTLS) {
6385 cpu_set_tls (new_env, newtls);
6388 memset(&info, 0, sizeof(info));
6389 pthread_mutex_init(&info.mutex, NULL);
6390 pthread_mutex_lock(&info.mutex);
6391 pthread_cond_init(&info.cond, NULL);
6393 if (flags & CLONE_CHILD_SETTID) {
6394 info.child_tidptr = child_tidptr;
6396 if (flags & CLONE_PARENT_SETTID) {
6397 info.parent_tidptr = parent_tidptr;
6400 ret = pthread_attr_init(&attr);
6401 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6402 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6403 /* It is not safe to deliver signals until the child has finished
6404 initializing, so temporarily block all signals. */
6405 sigfillset(&sigmask);
6406 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6408 /* If this is our first additional thread, we need to ensure we
6409 * generate code for parallel execution and flush old translations.
6411 if (!parallel_cpus) {
6412 parallel_cpus = true;
6416 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6417 /* TODO: Free new CPU state if thread creation failed. */
6419 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6420 pthread_attr_destroy(&attr);
6422 /* Wait for the child to initialize. */
6423 pthread_cond_wait(&info.cond, &info.mutex);
6428 pthread_mutex_unlock(&info.mutex);
6429 pthread_cond_destroy(&info.cond);
6430 pthread_mutex_destroy(&info.mutex);
6431 pthread_mutex_unlock(&clone_lock);
6433 /* if no CLONE_VM, we consider it is a fork */
6434 if (flags & CLONE_INVALID_FORK_FLAGS) {
6435 return -TARGET_EINVAL;
6438 /* We can't support custom termination signals */
6439 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6440 return -TARGET_EINVAL;
6443 if (block_signals()) {
6444 return -TARGET_ERESTARTSYS;
6450 /* Child Process. */
6451 cpu_clone_regs(env, newsp);
6453 /* There is a race condition here. The parent process could
6454 theoretically read the TID in the child process before the child
6455 tid is set. This would require using either ptrace
6456 (not implemented) or having *_tidptr to point at a shared memory
6457 mapping. We can't repeat the spinlock hack used above because
6458 the child process gets its own copy of the lock. */
6459 if (flags & CLONE_CHILD_SETTID)
6460 put_user_u32(gettid(), child_tidptr);
6461 if (flags & CLONE_PARENT_SETTID)
6462 put_user_u32(gettid(), parent_tidptr);
6463 ts = (TaskState *)cpu->opaque;
6464 if (flags & CLONE_SETTLS)
6465 cpu_set_tls (env, newtls);
6466 if (flags & CLONE_CHILD_CLEARTID)
6467 ts->child_tidptr = child_tidptr;
6475 /* warning : doesn't handle linux specific flags... */
6476 static int target_to_host_fcntl_cmd(int cmd)
6479 case TARGET_F_DUPFD:
6480 case TARGET_F_GETFD:
6481 case TARGET_F_SETFD:
6482 case TARGET_F_GETFL:
6483 case TARGET_F_SETFL:
6485 case TARGET_F_GETLK:
6487 case TARGET_F_SETLK:
6489 case TARGET_F_SETLKW:
6491 case TARGET_F_GETOWN:
6493 case TARGET_F_SETOWN:
6495 case TARGET_F_GETSIG:
6497 case TARGET_F_SETSIG:
6499 #if TARGET_ABI_BITS == 32
6500 case TARGET_F_GETLK64:
6502 case TARGET_F_SETLK64:
6504 case TARGET_F_SETLKW64:
6507 case TARGET_F_SETLEASE:
6509 case TARGET_F_GETLEASE:
6511 #ifdef F_DUPFD_CLOEXEC
6512 case TARGET_F_DUPFD_CLOEXEC:
6513 return F_DUPFD_CLOEXEC;
6515 case TARGET_F_NOTIFY:
6518 case TARGET_F_GETOWN_EX:
6522 case TARGET_F_SETOWN_EX:
6526 case TARGET_F_SETPIPE_SZ:
6527 return F_SETPIPE_SZ;
6528 case TARGET_F_GETPIPE_SZ:
6529 return F_GETPIPE_SZ;
6532 return -TARGET_EINVAL;
6534 return -TARGET_EINVAL;
6537 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6538 static const bitmask_transtbl flock_tbl[] = {
6539 TRANSTBL_CONVERT(F_RDLCK),
6540 TRANSTBL_CONVERT(F_WRLCK),
6541 TRANSTBL_CONVERT(F_UNLCK),
6542 TRANSTBL_CONVERT(F_EXLCK),
6543 TRANSTBL_CONVERT(F_SHLCK),
6547 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6548 abi_ulong target_flock_addr)
6550 struct target_flock *target_fl;
6553 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6554 return -TARGET_EFAULT;
6557 __get_user(l_type, &target_fl->l_type);
6558 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6559 __get_user(fl->l_whence, &target_fl->l_whence);
6560 __get_user(fl->l_start, &target_fl->l_start);
6561 __get_user(fl->l_len, &target_fl->l_len);
6562 __get_user(fl->l_pid, &target_fl->l_pid);
6563 unlock_user_struct(target_fl, target_flock_addr, 0);
6567 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6568 const struct flock64 *fl)
6570 struct target_flock *target_fl;
6573 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6574 return -TARGET_EFAULT;
6577 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6578 __put_user(l_type, &target_fl->l_type);
6579 __put_user(fl->l_whence, &target_fl->l_whence);
6580 __put_user(fl->l_start, &target_fl->l_start);
6581 __put_user(fl->l_len, &target_fl->l_len);
6582 __put_user(fl->l_pid, &target_fl->l_pid);
6583 unlock_user_struct(target_fl, target_flock_addr, 1);
6587 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6588 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6590 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6591 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6592 abi_ulong target_flock_addr)
6594 struct target_eabi_flock64 *target_fl;
6597 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6598 return -TARGET_EFAULT;
6601 __get_user(l_type, &target_fl->l_type);
6602 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6603 __get_user(fl->l_whence, &target_fl->l_whence);
6604 __get_user(fl->l_start, &target_fl->l_start);
6605 __get_user(fl->l_len, &target_fl->l_len);
6606 __get_user(fl->l_pid, &target_fl->l_pid);
6607 unlock_user_struct(target_fl, target_flock_addr, 0);
6611 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6612 const struct flock64 *fl)
6614 struct target_eabi_flock64 *target_fl;
6617 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6618 return -TARGET_EFAULT;
6621 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6622 __put_user(l_type, &target_fl->l_type);
6623 __put_user(fl->l_whence, &target_fl->l_whence);
6624 __put_user(fl->l_start, &target_fl->l_start);
6625 __put_user(fl->l_len, &target_fl->l_len);
6626 __put_user(fl->l_pid, &target_fl->l_pid);
6627 unlock_user_struct(target_fl, target_flock_addr, 1);
6632 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6633 abi_ulong target_flock_addr)
6635 struct target_flock64 *target_fl;
6638 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6639 return -TARGET_EFAULT;
6642 __get_user(l_type, &target_fl->l_type);
6643 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6644 __get_user(fl->l_whence, &target_fl->l_whence);
6645 __get_user(fl->l_start, &target_fl->l_start);
6646 __get_user(fl->l_len, &target_fl->l_len);
6647 __get_user(fl->l_pid, &target_fl->l_pid);
6648 unlock_user_struct(target_fl, target_flock_addr, 0);
6652 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6653 const struct flock64 *fl)
6655 struct target_flock64 *target_fl;
6658 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6659 return -TARGET_EFAULT;
6662 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6663 __put_user(l_type, &target_fl->l_type);
6664 __put_user(fl->l_whence, &target_fl->l_whence);
6665 __put_user(fl->l_start, &target_fl->l_start);
6666 __put_user(fl->l_len, &target_fl->l_len);
6667 __put_user(fl->l_pid, &target_fl->l_pid);
6668 unlock_user_struct(target_fl, target_flock_addr, 1);
6672 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6674 struct flock64 fl64;
6676 struct f_owner_ex fox;
6677 struct target_f_owner_ex *target_fox;
6680 int host_cmd = target_to_host_fcntl_cmd(cmd);
6682 if (host_cmd == -TARGET_EINVAL)
6686 case TARGET_F_GETLK:
6687 ret = copy_from_user_flock(&fl64, arg);
6691 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6693 ret = copy_to_user_flock(arg, &fl64);
6697 case TARGET_F_SETLK:
6698 case TARGET_F_SETLKW:
6699 ret = copy_from_user_flock(&fl64, arg);
6703 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6706 case TARGET_F_GETLK64:
6707 ret = copy_from_user_flock64(&fl64, arg);
6711 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6713 ret = copy_to_user_flock64(arg, &fl64);
6716 case TARGET_F_SETLK64:
6717 case TARGET_F_SETLKW64:
6718 ret = copy_from_user_flock64(&fl64, arg);
6722 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6725 case TARGET_F_GETFL:
6726 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6728 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6732 case TARGET_F_SETFL:
6733 ret = get_errno(safe_fcntl(fd, host_cmd,
6734 target_to_host_bitmask(arg,
6739 case TARGET_F_GETOWN_EX:
6740 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6742 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6743 return -TARGET_EFAULT;
6744 target_fox->type = tswap32(fox.type);
6745 target_fox->pid = tswap32(fox.pid);
6746 unlock_user_struct(target_fox, arg, 1);
6752 case TARGET_F_SETOWN_EX:
6753 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6754 return -TARGET_EFAULT;
6755 fox.type = tswap32(target_fox->type);
6756 fox.pid = tswap32(target_fox->pid);
6757 unlock_user_struct(target_fox, arg, 0);
6758 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6762 case TARGET_F_SETOWN:
6763 case TARGET_F_GETOWN:
6764 case TARGET_F_SETSIG:
6765 case TARGET_F_GETSIG:
6766 case TARGET_F_SETLEASE:
6767 case TARGET_F_GETLEASE:
6768 case TARGET_F_SETPIPE_SZ:
6769 case TARGET_F_GETPIPE_SZ:
6770 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6774 ret = get_errno(safe_fcntl(fd, cmd, arg));
6782 static inline int high2lowuid(int uid)
6790 static inline int high2lowgid(int gid)
6798 static inline int low2highuid(int uid)
6800 if ((int16_t)uid == -1)
6806 static inline int low2highgid(int gid)
6808 if ((int16_t)gid == -1)
6813 static inline int tswapid(int id)
6818 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6820 #else /* !USE_UID16 */
6821 static inline int high2lowuid(int uid)
6825 static inline int high2lowgid(int gid)
6829 static inline int low2highuid(int uid)
6833 static inline int low2highgid(int gid)
6837 static inline int tswapid(int id)
6842 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6844 #endif /* USE_UID16 */
6846 /* We must do direct syscalls for setting UID/GID, because we want to
6847 * implement the Linux system call semantics of "change only for this thread",
6848 * not the libc/POSIX semantics of "change for all threads in process".
6849 * (See http://ewontfix.com/17/ for more details.)
6850 * We use the 32-bit version of the syscalls if present; if it is not
6851 * then either the host architecture supports 32-bit UIDs natively with
6852 * the standard syscall, or the 16-bit UID is the best we can do.
6854 #ifdef __NR_setuid32
6855 #define __NR_sys_setuid __NR_setuid32
6857 #define __NR_sys_setuid __NR_setuid
6859 #ifdef __NR_setgid32
6860 #define __NR_sys_setgid __NR_setgid32
6862 #define __NR_sys_setgid __NR_setgid
6864 #ifdef __NR_setresuid32
6865 #define __NR_sys_setresuid __NR_setresuid32
6867 #define __NR_sys_setresuid __NR_setresuid
6869 #ifdef __NR_setresgid32
6870 #define __NR_sys_setresgid __NR_setresgid32
6872 #define __NR_sys_setresgid __NR_setresgid
6875 _syscall1(int, sys_setuid, uid_t, uid)
6876 _syscall1(int, sys_setgid, gid_t, gid)
6877 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6878 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6880 void syscall_init(void)
6883 const argtype *arg_type;
6887 thunk_init(STRUCT_MAX);
6889 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6890 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6891 #include "syscall_types.h"
6893 #undef STRUCT_SPECIAL
6895 /* Build target_to_host_errno_table[] table from
6896 * host_to_target_errno_table[]. */
6897 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6898 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6901 /* we patch the ioctl size if necessary. We rely on the fact that
6902 no ioctl has all the bits at '1' in the size field */
6904 while (ie->target_cmd != 0) {
6905 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6906 TARGET_IOC_SIZEMASK) {
6907 arg_type = ie->arg_type;
6908 if (arg_type[0] != TYPE_PTR) {
6909 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6914 size = thunk_type_size(arg_type, 0);
6915 ie->target_cmd = (ie->target_cmd &
6916 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6917 (size << TARGET_IOC_SIZESHIFT);
6920 /* automatic consistency check if same arch */
6921 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6922 (defined(__x86_64__) && defined(TARGET_X86_64))
6923 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6924 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6925 ie->name, ie->target_cmd, ie->host_cmd);
6932 #if TARGET_ABI_BITS == 32
6933 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6935 #ifdef TARGET_WORDS_BIGENDIAN
6936 return ((uint64_t)word0 << 32) | word1;
6938 return ((uint64_t)word1 << 32) | word0;
6941 #else /* TARGET_ABI_BITS == 32 */
6942 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6946 #endif /* TARGET_ABI_BITS != 32 */
6948 #ifdef TARGET_NR_truncate64
6949 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6954 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6958 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6962 #ifdef TARGET_NR_ftruncate64
6963 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6968 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6972 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6976 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6977 abi_ulong target_addr)
6979 struct target_timespec *target_ts;
6981 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6982 return -TARGET_EFAULT;
6983 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6984 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6985 unlock_user_struct(target_ts, target_addr, 0);
6989 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6990 struct timespec *host_ts)
6992 struct target_timespec *target_ts;
6994 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6995 return -TARGET_EFAULT;
6996 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6997 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6998 unlock_user_struct(target_ts, target_addr, 1);
7002 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7003 abi_ulong target_addr)
7005 struct target_itimerspec *target_itspec;
7007 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7008 return -TARGET_EFAULT;
7011 host_itspec->it_interval.tv_sec =
7012 tswapal(target_itspec->it_interval.tv_sec);
7013 host_itspec->it_interval.tv_nsec =
7014 tswapal(target_itspec->it_interval.tv_nsec);
7015 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7016 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7018 unlock_user_struct(target_itspec, target_addr, 1);
7022 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7023 struct itimerspec *host_its)
7025 struct target_itimerspec *target_itspec;
7027 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7028 return -TARGET_EFAULT;
7031 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7032 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7034 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7035 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7037 unlock_user_struct(target_itspec, target_addr, 0);
7041 static inline abi_long target_to_host_timex(struct timex *host_tx,
7042 abi_long target_addr)
7044 struct target_timex *target_tx;
7046 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7047 return -TARGET_EFAULT;
7050 __get_user(host_tx->modes, &target_tx->modes);
7051 __get_user(host_tx->offset, &target_tx->offset);
7052 __get_user(host_tx->freq, &target_tx->freq);
7053 __get_user(host_tx->maxerror, &target_tx->maxerror);
7054 __get_user(host_tx->esterror, &target_tx->esterror);
7055 __get_user(host_tx->status, &target_tx->status);
7056 __get_user(host_tx->constant, &target_tx->constant);
7057 __get_user(host_tx->precision, &target_tx->precision);
7058 __get_user(host_tx->tolerance, &target_tx->tolerance);
7059 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7060 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7061 __get_user(host_tx->tick, &target_tx->tick);
7062 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7063 __get_user(host_tx->jitter, &target_tx->jitter);
7064 __get_user(host_tx->shift, &target_tx->shift);
7065 __get_user(host_tx->stabil, &target_tx->stabil);
7066 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7067 __get_user(host_tx->calcnt, &target_tx->calcnt);
7068 __get_user(host_tx->errcnt, &target_tx->errcnt);
7069 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7070 __get_user(host_tx->tai, &target_tx->tai);
7072 unlock_user_struct(target_tx, target_addr, 0);
7076 static inline abi_long host_to_target_timex(abi_long target_addr,
7077 struct timex *host_tx)
7079 struct target_timex *target_tx;
7081 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7082 return -TARGET_EFAULT;
7085 __put_user(host_tx->modes, &target_tx->modes);
7086 __put_user(host_tx->offset, &target_tx->offset);
7087 __put_user(host_tx->freq, &target_tx->freq);
7088 __put_user(host_tx->maxerror, &target_tx->maxerror);
7089 __put_user(host_tx->esterror, &target_tx->esterror);
7090 __put_user(host_tx->status, &target_tx->status);
7091 __put_user(host_tx->constant, &target_tx->constant);
7092 __put_user(host_tx->precision, &target_tx->precision);
7093 __put_user(host_tx->tolerance, &target_tx->tolerance);
7094 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7095 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7096 __put_user(host_tx->tick, &target_tx->tick);
7097 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7098 __put_user(host_tx->jitter, &target_tx->jitter);
7099 __put_user(host_tx->shift, &target_tx->shift);
7100 __put_user(host_tx->stabil, &target_tx->stabil);
7101 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7102 __put_user(host_tx->calcnt, &target_tx->calcnt);
7103 __put_user(host_tx->errcnt, &target_tx->errcnt);
7104 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7105 __put_user(host_tx->tai, &target_tx->tai);
7107 unlock_user_struct(target_tx, target_addr, 1);
7112 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7113 abi_ulong target_addr)
7115 struct target_sigevent *target_sevp;
7117 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7118 return -TARGET_EFAULT;
7121 /* This union is awkward on 64 bit systems because it has a 32 bit
7122 * integer and a pointer in it; we follow the conversion approach
7123 * used for handling sigval types in signal.c so the guest should get
7124 * the correct value back even if we did a 64 bit byteswap and it's
7125 * using the 32 bit integer.
7127 host_sevp->sigev_value.sival_ptr =
7128 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7129 host_sevp->sigev_signo =
7130 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7131 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7132 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7134 unlock_user_struct(target_sevp, target_addr, 1);
7138 #if defined(TARGET_NR_mlockall)
7139 static inline int target_to_host_mlockall_arg(int arg)
7143 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7144 result |= MCL_CURRENT;
7146 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7147 result |= MCL_FUTURE;
7153 static inline abi_long host_to_target_stat64(void *cpu_env,
7154 abi_ulong target_addr,
7155 struct stat *host_st)
7157 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7158 if (((CPUARMState *)cpu_env)->eabi) {
7159 struct target_eabi_stat64 *target_st;
7161 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7162 return -TARGET_EFAULT;
7163 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7164 __put_user(host_st->st_dev, &target_st->st_dev);
7165 __put_user(host_st->st_ino, &target_st->st_ino);
7166 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7167 __put_user(host_st->st_ino, &target_st->__st_ino);
7169 __put_user(host_st->st_mode, &target_st->st_mode);
7170 __put_user(host_st->st_nlink, &target_st->st_nlink);
7171 __put_user(host_st->st_uid, &target_st->st_uid);
7172 __put_user(host_st->st_gid, &target_st->st_gid);
7173 __put_user(host_st->st_rdev, &target_st->st_rdev);
7174 __put_user(host_st->st_size, &target_st->st_size);
7175 __put_user(host_st->st_blksize, &target_st->st_blksize);
7176 __put_user(host_st->st_blocks, &target_st->st_blocks);
7177 __put_user(host_st->st_atime, &target_st->target_st_atime);
7178 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7179 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7180 unlock_user_struct(target_st, target_addr, 1);
7184 #if defined(TARGET_HAS_STRUCT_STAT64)
7185 struct target_stat64 *target_st;
7187 struct target_stat *target_st;
7190 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7191 return -TARGET_EFAULT;
7192 memset(target_st, 0, sizeof(*target_st));
7193 __put_user(host_st->st_dev, &target_st->st_dev);
7194 __put_user(host_st->st_ino, &target_st->st_ino);
7195 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7196 __put_user(host_st->st_ino, &target_st->__st_ino);
7198 __put_user(host_st->st_mode, &target_st->st_mode);
7199 __put_user(host_st->st_nlink, &target_st->st_nlink);
7200 __put_user(host_st->st_uid, &target_st->st_uid);
7201 __put_user(host_st->st_gid, &target_st->st_gid);
7202 __put_user(host_st->st_rdev, &target_st->st_rdev);
7203 /* XXX: better use of kernel struct */
7204 __put_user(host_st->st_size, &target_st->st_size);
7205 __put_user(host_st->st_blksize, &target_st->st_blksize);
7206 __put_user(host_st->st_blocks, &target_st->st_blocks);
7207 __put_user(host_st->st_atime, &target_st->target_st_atime);
7208 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7209 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7210 unlock_user_struct(target_st, target_addr, 1);
7216 /* ??? Using host futex calls even when target atomic operations
7217 are not really atomic probably breaks things. However implementing
7218 futexes locally would make futexes shared between multiple processes
7219 tricky. However they're probably useless because guest atomic
7220 operations won't work either. */
7221 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7222 target_ulong uaddr2, int val3)
7224 struct timespec ts, *pts;
7227 /* ??? We assume FUTEX_* constants are the same on both host
7229 #ifdef FUTEX_CMD_MASK
7230 base_op = op & FUTEX_CMD_MASK;
7236 case FUTEX_WAIT_BITSET:
7239 target_to_host_timespec(pts, timeout);
7243 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7246 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7248 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7250 case FUTEX_CMP_REQUEUE:
7252 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7253 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7254 But the prototype takes a `struct timespec *'; insert casts
7255 to satisfy the compiler. We do not need to tswap TIMEOUT
7256 since it's not compared to guest memory. */
7257 pts = (struct timespec *)(uintptr_t) timeout;
7258 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7260 (base_op == FUTEX_CMP_REQUEUE
7264 return -TARGET_ENOSYS;
7267 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7268 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7269 abi_long handle, abi_long mount_id,
7272 struct file_handle *target_fh;
7273 struct file_handle *fh;
7277 unsigned int size, total_size;
7279 if (get_user_s32(size, handle)) {
7280 return -TARGET_EFAULT;
7283 name = lock_user_string(pathname);
7285 return -TARGET_EFAULT;
7288 total_size = sizeof(struct file_handle) + size;
7289 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7291 unlock_user(name, pathname, 0);
7292 return -TARGET_EFAULT;
7295 fh = g_malloc0(total_size);
7296 fh->handle_bytes = size;
7298 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7299 unlock_user(name, pathname, 0);
7301 /* man name_to_handle_at(2):
7302 * Other than the use of the handle_bytes field, the caller should treat
7303 * the file_handle structure as an opaque data type
7306 memcpy(target_fh, fh, total_size);
7307 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7308 target_fh->handle_type = tswap32(fh->handle_type);
7310 unlock_user(target_fh, handle, total_size);
7312 if (put_user_s32(mid, mount_id)) {
7313 return -TARGET_EFAULT;
7321 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7322 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7325 struct file_handle *target_fh;
7326 struct file_handle *fh;
7327 unsigned int size, total_size;
7330 if (get_user_s32(size, handle)) {
7331 return -TARGET_EFAULT;
7334 total_size = sizeof(struct file_handle) + size;
7335 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7337 return -TARGET_EFAULT;
7340 fh = g_memdup(target_fh, total_size);
7341 fh->handle_bytes = size;
7342 fh->handle_type = tswap32(target_fh->handle_type);
7344 ret = get_errno(open_by_handle_at(mount_fd, fh,
7345 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7349 unlock_user(target_fh, handle, total_size);
7355 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7357 /* signalfd siginfo conversion */
7360 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7361 const struct signalfd_siginfo *info)
7363 int sig = host_to_target_signal(info->ssi_signo);
7365 /* linux/signalfd.h defines a ssi_addr_lsb
7366 * not defined in sys/signalfd.h but used by some kernels
7369 #ifdef BUS_MCEERR_AO
7370 if (tinfo->ssi_signo == SIGBUS &&
7371 (tinfo->ssi_code == BUS_MCEERR_AR ||
7372 tinfo->ssi_code == BUS_MCEERR_AO)) {
7373 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7374 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7375 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7379 tinfo->ssi_signo = tswap32(sig);
7380 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7381 tinfo->ssi_code = tswap32(info->ssi_code);
7382 tinfo->ssi_pid = tswap32(info->ssi_pid);
7383 tinfo->ssi_uid = tswap32(info->ssi_uid);
7384 tinfo->ssi_fd = tswap32(info->ssi_fd);
7385 tinfo->ssi_tid = tswap32(info->ssi_tid);
7386 tinfo->ssi_band = tswap32(info->ssi_band);
7387 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7388 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7389 tinfo->ssi_status = tswap32(info->ssi_status);
7390 tinfo->ssi_int = tswap32(info->ssi_int);
7391 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7392 tinfo->ssi_utime = tswap64(info->ssi_utime);
7393 tinfo->ssi_stime = tswap64(info->ssi_stime);
7394 tinfo->ssi_addr = tswap64(info->ssi_addr);
7397 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7401 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7402 host_to_target_signalfd_siginfo(buf + i, buf + i);
7408 static TargetFdTrans target_signalfd_trans = {
7409 .host_to_target_data = host_to_target_data_signalfd,
7412 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7415 target_sigset_t *target_mask;
7419 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7420 return -TARGET_EINVAL;
7422 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7423 return -TARGET_EFAULT;
7426 target_to_host_sigset(&host_mask, target_mask);
7428 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7430 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7432 fd_trans_register(ret, &target_signalfd_trans);
7435 unlock_user_struct(target_mask, mask, 0);
7441 /* Map host to target signal numbers for the wait family of syscalls.
7442 Assume all other status bits are the same. */
7443 int host_to_target_waitstatus(int status)
7445 if (WIFSIGNALED(status)) {
7446 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7448 if (WIFSTOPPED(status)) {
7449 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7455 static int open_self_cmdline(void *cpu_env, int fd)
7457 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7458 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7461 for (i = 0; i < bprm->argc; i++) {
7462 size_t len = strlen(bprm->argv[i]) + 1;
7464 if (write(fd, bprm->argv[i], len) != len) {
7472 static int open_self_maps(void *cpu_env, int fd)
7474 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7475 TaskState *ts = cpu->opaque;
7481 fp = fopen("/proc/self/maps", "r");
7486 while ((read = getline(&line, &len, fp)) != -1) {
7487 int fields, dev_maj, dev_min, inode;
7488 uint64_t min, max, offset;
7489 char flag_r, flag_w, flag_x, flag_p;
7490 char path[512] = "";
7491 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7492 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7493 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7495 if ((fields < 10) || (fields > 11)) {
7498 if (h2g_valid(min)) {
7499 int flags = page_get_flags(h2g(min));
7500 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7501 if (page_check_range(h2g(min), max - min, flags) == -1) {
7504 if (h2g(min) == ts->info->stack_limit) {
7505 pstrcpy(path, sizeof(path), " [stack]");
7507 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7508 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7509 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7510 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7511 path[0] ? " " : "", path);
7521 static int open_self_stat(void *cpu_env, int fd)
7523 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7524 TaskState *ts = cpu->opaque;
7525 abi_ulong start_stack = ts->info->start_stack;
7528 for (i = 0; i < 44; i++) {
7536 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7537 } else if (i == 1) {
7539 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7540 } else if (i == 27) {
7543 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7545 /* for the rest, there is MasterCard */
7546 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7550 if (write(fd, buf, len) != len) {
7558 static int open_self_auxv(void *cpu_env, int fd)
7560 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7561 TaskState *ts = cpu->opaque;
7562 abi_ulong auxv = ts->info->saved_auxv;
7563 abi_ulong len = ts->info->auxv_len;
7567 * Auxiliary vector is stored in target process stack.
7568 * read in whole auxv vector and copy it to file
7570 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7574 r = write(fd, ptr, len);
7581 lseek(fd, 0, SEEK_SET);
7582 unlock_user(ptr, auxv, len);
7588 static int is_proc_myself(const char *filename, const char *entry)
7590 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7591 filename += strlen("/proc/");
7592 if (!strncmp(filename, "self/", strlen("self/"))) {
7593 filename += strlen("self/");
7594 } else if (*filename >= '1' && *filename <= '9') {
7596 snprintf(myself, sizeof(myself), "%d/", getpid());
7597 if (!strncmp(filename, myself, strlen(myself))) {
7598 filename += strlen(myself);
7605 if (!strcmp(filename, entry)) {
7612 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7613 static int is_proc(const char *filename, const char *entry)
7615 return strcmp(filename, entry) == 0;
7618 static int open_net_route(void *cpu_env, int fd)
7625 fp = fopen("/proc/net/route", "r");
7632 read = getline(&line, &len, fp);
7633 dprintf(fd, "%s", line);
7637 while ((read = getline(&line, &len, fp)) != -1) {
7639 uint32_t dest, gw, mask;
7640 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7641 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7642 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7643 &mask, &mtu, &window, &irtt);
7644 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7645 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7646 metric, tswap32(mask), mtu, window, irtt);
7656 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7659 const char *filename;
7660 int (*fill)(void *cpu_env, int fd);
7661 int (*cmp)(const char *s1, const char *s2);
7663 const struct fake_open *fake_open;
7664 static const struct fake_open fakes[] = {
7665 { "maps", open_self_maps, is_proc_myself },
7666 { "stat", open_self_stat, is_proc_myself },
7667 { "auxv", open_self_auxv, is_proc_myself },
7668 { "cmdline", open_self_cmdline, is_proc_myself },
7669 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7670 { "/proc/net/route", open_net_route, is_proc },
7672 { NULL, NULL, NULL }
7675 if (is_proc_myself(pathname, "exe")) {
7676 int execfd = qemu_getauxval(AT_EXECFD);
7677 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7680 for (fake_open = fakes; fake_open->filename; fake_open++) {
7681 if (fake_open->cmp(pathname, fake_open->filename)) {
7686 if (fake_open->filename) {
7688 char filename[PATH_MAX];
7691 /* create temporary file to map stat to */
7692 tmpdir = getenv("TMPDIR");
7695 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7696 fd = mkstemp(filename);
7702 if ((r = fake_open->fill(cpu_env, fd))) {
7708 lseek(fd, 0, SEEK_SET);
7713 return safe_openat(dirfd, path(pathname), flags, mode);
7716 #define TIMER_MAGIC 0x0caf0000
7717 #define TIMER_MAGIC_MASK 0xffff0000
7719 /* Convert QEMU provided timer ID back to internal 16bit index format */
7720 static target_timer_t get_timer_id(abi_long arg)
7722 target_timer_t timerid = arg;
7724 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7725 return -TARGET_EINVAL;
7730 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7731 return -TARGET_EINVAL;
7737 static abi_long swap_data_eventfd(void *buf, size_t len)
7739 uint64_t *counter = buf;
7742 if (len < sizeof(uint64_t)) {
7746 for (i = 0; i < len; i += sizeof(uint64_t)) {
7747 *counter = tswap64(*counter);
7754 static TargetFdTrans target_eventfd_trans = {
7755 .host_to_target_data = swap_data_eventfd,
7756 .target_to_host_data = swap_data_eventfd,
7759 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7760 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7761 defined(__NR_inotify_init1))
7762 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7764 struct inotify_event *ev;
7768 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7769 ev = (struct inotify_event *)((char *)buf + i);
7772 ev->wd = tswap32(ev->wd);
7773 ev->mask = tswap32(ev->mask);
7774 ev->cookie = tswap32(ev->cookie);
7775 ev->len = tswap32(name_len);
7781 static TargetFdTrans target_inotify_trans = {
7782 .host_to_target_data = host_to_target_data_inotify,
7786 static int target_to_host_cpu_mask(unsigned long *host_mask,
7788 abi_ulong target_addr,
7791 unsigned target_bits = sizeof(abi_ulong) * 8;
7792 unsigned host_bits = sizeof(*host_mask) * 8;
7793 abi_ulong *target_mask;
7796 assert(host_size >= target_size);
7798 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7800 return -TARGET_EFAULT;
7802 memset(host_mask, 0, host_size);
7804 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7805 unsigned bit = i * target_bits;
7808 __get_user(val, &target_mask[i]);
7809 for (j = 0; j < target_bits; j++, bit++) {
7810 if (val & (1UL << j)) {
7811 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7816 unlock_user(target_mask, target_addr, 0);
7820 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7822 abi_ulong target_addr,
7825 unsigned target_bits = sizeof(abi_ulong) * 8;
7826 unsigned host_bits = sizeof(*host_mask) * 8;
7827 abi_ulong *target_mask;
7830 assert(host_size >= target_size);
7832 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7834 return -TARGET_EFAULT;
7837 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7838 unsigned bit = i * target_bits;
7841 for (j = 0; j < target_bits; j++, bit++) {
7842 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7846 __put_user(val, &target_mask[i]);
7849 unlock_user(target_mask, target_addr, target_size);
7853 /* do_syscall() should always have a single exit point at the end so
7854 that actions, such as logging of syscall results, can be performed.
7855 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7856 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7857 abi_long arg2, abi_long arg3, abi_long arg4,
7858 abi_long arg5, abi_long arg6, abi_long arg7,
7861 CPUState *cpu = ENV_GET_CPU(cpu_env);
7867 #if defined(DEBUG_ERESTARTSYS)
7868 /* Debug-only code for exercising the syscall-restart code paths
7869 * in the per-architecture cpu main loops: restart every syscall
7870 * the guest makes once before letting it through.
7877 return -TARGET_ERESTARTSYS;
7883 gemu_log("syscall %d", num);
7885 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7887 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7890 case TARGET_NR_exit:
7891 /* In old applications this may be used to implement _exit(2).
7892 However in threaded applictions it is used for thread termination,
7893 and _exit_group is used for application termination.
7894 Do thread termination if we have more then one thread. */
7896 if (block_signals()) {
7897 ret = -TARGET_ERESTARTSYS;
7903 if (CPU_NEXT(first_cpu)) {
7906 /* Remove the CPU from the list. */
7907 QTAILQ_REMOVE(&cpus, cpu, node);
7912 if (ts->child_tidptr) {
7913 put_user_u32(0, ts->child_tidptr);
7914 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7918 object_unref(OBJECT(cpu));
7920 rcu_unregister_thread();
7928 gdb_exit(cpu_env, arg1);
7930 ret = 0; /* avoid warning */
7932 case TARGET_NR_read:
7936 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7938 ret = get_errno(safe_read(arg1, p, arg3));
7940 fd_trans_host_to_target_data(arg1)) {
7941 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7943 unlock_user(p, arg2, ret);
7946 case TARGET_NR_write:
7947 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7949 if (fd_trans_target_to_host_data(arg1)) {
7950 void *copy = g_malloc(arg3);
7951 memcpy(copy, p, arg3);
7952 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7954 ret = get_errno(safe_write(arg1, copy, ret));
7958 ret = get_errno(safe_write(arg1, p, arg3));
7960 unlock_user(p, arg2, 0);
7962 #ifdef TARGET_NR_open
7963 case TARGET_NR_open:
7964 if (!(p = lock_user_string(arg1)))
7966 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7967 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7969 fd_trans_unregister(ret);
7970 unlock_user(p, arg1, 0);
7973 case TARGET_NR_openat:
7974 if (!(p = lock_user_string(arg2)))
7976 ret = get_errno(do_openat(cpu_env, arg1, p,
7977 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7979 fd_trans_unregister(ret);
7980 unlock_user(p, arg2, 0);
7982 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7983 case TARGET_NR_name_to_handle_at:
7984 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7987 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7988 case TARGET_NR_open_by_handle_at:
7989 ret = do_open_by_handle_at(arg1, arg2, arg3);
7990 fd_trans_unregister(ret);
7993 case TARGET_NR_close:
7994 fd_trans_unregister(arg1);
7995 ret = get_errno(close(arg1));
8000 #ifdef TARGET_NR_fork
8001 case TARGET_NR_fork:
8002 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8005 #ifdef TARGET_NR_waitpid
8006 case TARGET_NR_waitpid:
8009 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8010 if (!is_error(ret) && arg2 && ret
8011 && put_user_s32(host_to_target_waitstatus(status), arg2))
8016 #ifdef TARGET_NR_waitid
8017 case TARGET_NR_waitid:
8021 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8022 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8023 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8025 host_to_target_siginfo(p, &info);
8026 unlock_user(p, arg3, sizeof(target_siginfo_t));
8031 #ifdef TARGET_NR_creat /* not on alpha */
8032 case TARGET_NR_creat:
8033 if (!(p = lock_user_string(arg1)))
8035 ret = get_errno(creat(p, arg2));
8036 fd_trans_unregister(ret);
8037 unlock_user(p, arg1, 0);
8040 #ifdef TARGET_NR_link
8041 case TARGET_NR_link:
8044 p = lock_user_string(arg1);
8045 p2 = lock_user_string(arg2);
8047 ret = -TARGET_EFAULT;
8049 ret = get_errno(link(p, p2));
8050 unlock_user(p2, arg2, 0);
8051 unlock_user(p, arg1, 0);
8055 #if defined(TARGET_NR_linkat)
8056 case TARGET_NR_linkat:
8061 p = lock_user_string(arg2);
8062 p2 = lock_user_string(arg4);
8064 ret = -TARGET_EFAULT;
8066 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8067 unlock_user(p, arg2, 0);
8068 unlock_user(p2, arg4, 0);
8072 #ifdef TARGET_NR_unlink
8073 case TARGET_NR_unlink:
8074 if (!(p = lock_user_string(arg1)))
8076 ret = get_errno(unlink(p));
8077 unlock_user(p, arg1, 0);
8080 #if defined(TARGET_NR_unlinkat)
8081 case TARGET_NR_unlinkat:
8082 if (!(p = lock_user_string(arg2)))
8084 ret = get_errno(unlinkat(arg1, p, arg3));
8085 unlock_user(p, arg2, 0);
8088 case TARGET_NR_execve:
8090 char **argp, **envp;
8093 abi_ulong guest_argp;
8094 abi_ulong guest_envp;
8101 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8102 if (get_user_ual(addr, gp))
8110 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8111 if (get_user_ual(addr, gp))
8118 argp = g_new0(char *, argc + 1);
8119 envp = g_new0(char *, envc + 1);
8121 for (gp = guest_argp, q = argp; gp;
8122 gp += sizeof(abi_ulong), q++) {
8123 if (get_user_ual(addr, gp))
8127 if (!(*q = lock_user_string(addr)))
8129 total_size += strlen(*q) + 1;
8133 for (gp = guest_envp, q = envp; gp;
8134 gp += sizeof(abi_ulong), q++) {
8135 if (get_user_ual(addr, gp))
8139 if (!(*q = lock_user_string(addr)))
8141 total_size += strlen(*q) + 1;
8145 if (!(p = lock_user_string(arg1)))
8147 /* Although execve() is not an interruptible syscall it is
8148 * a special case where we must use the safe_syscall wrapper:
8149 * if we allow a signal to happen before we make the host
8150 * syscall then we will 'lose' it, because at the point of
8151 * execve the process leaves QEMU's control. So we use the
8152 * safe syscall wrapper to ensure that we either take the
8153 * signal as a guest signal, or else it does not happen
8154 * before the execve completes and makes it the other
8155 * program's problem.
8157 ret = get_errno(safe_execve(p, argp, envp));
8158 unlock_user(p, arg1, 0);
8163 ret = -TARGET_EFAULT;
8166 for (gp = guest_argp, q = argp; *q;
8167 gp += sizeof(abi_ulong), q++) {
8168 if (get_user_ual(addr, gp)
8171 unlock_user(*q, addr, 0);
8173 for (gp = guest_envp, q = envp; *q;
8174 gp += sizeof(abi_ulong), q++) {
8175 if (get_user_ual(addr, gp)
8178 unlock_user(*q, addr, 0);
8185 case TARGET_NR_chdir:
8186 if (!(p = lock_user_string(arg1)))
8188 ret = get_errno(chdir(p));
8189 unlock_user(p, arg1, 0);
8191 #ifdef TARGET_NR_time
8192 case TARGET_NR_time:
8195 ret = get_errno(time(&host_time));
8198 && put_user_sal(host_time, arg1))
8203 #ifdef TARGET_NR_mknod
8204 case TARGET_NR_mknod:
8205 if (!(p = lock_user_string(arg1)))
8207 ret = get_errno(mknod(p, arg2, arg3));
8208 unlock_user(p, arg1, 0);
8211 #if defined(TARGET_NR_mknodat)
8212 case TARGET_NR_mknodat:
8213 if (!(p = lock_user_string(arg2)))
8215 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8216 unlock_user(p, arg2, 0);
8219 #ifdef TARGET_NR_chmod
8220 case TARGET_NR_chmod:
8221 if (!(p = lock_user_string(arg1)))
8223 ret = get_errno(chmod(p, arg2));
8224 unlock_user(p, arg1, 0);
8227 #ifdef TARGET_NR_break
8228 case TARGET_NR_break:
8231 #ifdef TARGET_NR_oldstat
8232 case TARGET_NR_oldstat:
8235 case TARGET_NR_lseek:
8236 ret = get_errno(lseek(arg1, arg2, arg3));
8238 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8239 /* Alpha specific */
8240 case TARGET_NR_getxpid:
8241 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8242 ret = get_errno(getpid());
8245 #ifdef TARGET_NR_getpid
8246 case TARGET_NR_getpid:
8247 ret = get_errno(getpid());
8250 case TARGET_NR_mount:
8252 /* need to look at the data field */
8256 p = lock_user_string(arg1);
8264 p2 = lock_user_string(arg2);
8267 unlock_user(p, arg1, 0);
8273 p3 = lock_user_string(arg3);
8276 unlock_user(p, arg1, 0);
8278 unlock_user(p2, arg2, 0);
8285 /* FIXME - arg5 should be locked, but it isn't clear how to
8286 * do that since it's not guaranteed to be a NULL-terminated
8290 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8292 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8294 ret = get_errno(ret);
8297 unlock_user(p, arg1, 0);
8299 unlock_user(p2, arg2, 0);
8301 unlock_user(p3, arg3, 0);
8305 #ifdef TARGET_NR_umount
8306 case TARGET_NR_umount:
8307 if (!(p = lock_user_string(arg1)))
8309 ret = get_errno(umount(p));
8310 unlock_user(p, arg1, 0);
8313 #ifdef TARGET_NR_stime /* not on alpha */
8314 case TARGET_NR_stime:
8317 if (get_user_sal(host_time, arg1))
8319 ret = get_errno(stime(&host_time));
8323 case TARGET_NR_ptrace:
8325 #ifdef TARGET_NR_alarm /* not on alpha */
8326 case TARGET_NR_alarm:
8330 #ifdef TARGET_NR_oldfstat
8331 case TARGET_NR_oldfstat:
8334 #ifdef TARGET_NR_pause /* not on alpha */
8335 case TARGET_NR_pause:
8336 if (!block_signals()) {
8337 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8339 ret = -TARGET_EINTR;
8342 #ifdef TARGET_NR_utime
8343 case TARGET_NR_utime:
8345 struct utimbuf tbuf, *host_tbuf;
8346 struct target_utimbuf *target_tbuf;
8348 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8350 tbuf.actime = tswapal(target_tbuf->actime);
8351 tbuf.modtime = tswapal(target_tbuf->modtime);
8352 unlock_user_struct(target_tbuf, arg2, 0);
8357 if (!(p = lock_user_string(arg1)))
8359 ret = get_errno(utime(p, host_tbuf));
8360 unlock_user(p, arg1, 0);
8364 #ifdef TARGET_NR_utimes
8365 case TARGET_NR_utimes:
8367 struct timeval *tvp, tv[2];
8369 if (copy_from_user_timeval(&tv[0], arg2)
8370 || copy_from_user_timeval(&tv[1],
8371 arg2 + sizeof(struct target_timeval)))
8377 if (!(p = lock_user_string(arg1)))
8379 ret = get_errno(utimes(p, tvp));
8380 unlock_user(p, arg1, 0);
8384 #if defined(TARGET_NR_futimesat)
8385 case TARGET_NR_futimesat:
8387 struct timeval *tvp, tv[2];
8389 if (copy_from_user_timeval(&tv[0], arg3)
8390 || copy_from_user_timeval(&tv[1],
8391 arg3 + sizeof(struct target_timeval)))
8397 if (!(p = lock_user_string(arg2)))
8399 ret = get_errno(futimesat(arg1, path(p), tvp));
8400 unlock_user(p, arg2, 0);
8404 #ifdef TARGET_NR_stty
8405 case TARGET_NR_stty:
8408 #ifdef TARGET_NR_gtty
8409 case TARGET_NR_gtty:
8412 #ifdef TARGET_NR_access
8413 case TARGET_NR_access:
8414 if (!(p = lock_user_string(arg1)))
8416 ret = get_errno(access(path(p), arg2));
8417 unlock_user(p, arg1, 0);
8420 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8421 case TARGET_NR_faccessat:
8422 if (!(p = lock_user_string(arg2)))
8424 ret = get_errno(faccessat(arg1, p, arg3, 0));
8425 unlock_user(p, arg2, 0);
8428 #ifdef TARGET_NR_nice /* not on alpha */
8429 case TARGET_NR_nice:
8430 ret = get_errno(nice(arg1));
8433 #ifdef TARGET_NR_ftime
8434 case TARGET_NR_ftime:
8437 case TARGET_NR_sync:
8441 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8442 case TARGET_NR_syncfs:
8443 ret = get_errno(syncfs(arg1));
8446 case TARGET_NR_kill:
8447 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8449 #ifdef TARGET_NR_rename
8450 case TARGET_NR_rename:
8453 p = lock_user_string(arg1);
8454 p2 = lock_user_string(arg2);
8456 ret = -TARGET_EFAULT;
8458 ret = get_errno(rename(p, p2));
8459 unlock_user(p2, arg2, 0);
8460 unlock_user(p, arg1, 0);
8464 #if defined(TARGET_NR_renameat)
8465 case TARGET_NR_renameat:
8468 p = lock_user_string(arg2);
8469 p2 = lock_user_string(arg4);
8471 ret = -TARGET_EFAULT;
8473 ret = get_errno(renameat(arg1, p, arg3, p2));
8474 unlock_user(p2, arg4, 0);
8475 unlock_user(p, arg2, 0);
8479 #if defined(TARGET_NR_renameat2)
8480 case TARGET_NR_renameat2:
8483 p = lock_user_string(arg2);
8484 p2 = lock_user_string(arg4);
8486 ret = -TARGET_EFAULT;
8488 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8490 unlock_user(p2, arg4, 0);
8491 unlock_user(p, arg2, 0);
8495 #ifdef TARGET_NR_mkdir
8496 case TARGET_NR_mkdir:
8497 if (!(p = lock_user_string(arg1)))
8499 ret = get_errno(mkdir(p, arg2));
8500 unlock_user(p, arg1, 0);
8503 #if defined(TARGET_NR_mkdirat)
8504 case TARGET_NR_mkdirat:
8505 if (!(p = lock_user_string(arg2)))
8507 ret = get_errno(mkdirat(arg1, p, arg3));
8508 unlock_user(p, arg2, 0);
8511 #ifdef TARGET_NR_rmdir
8512 case TARGET_NR_rmdir:
8513 if (!(p = lock_user_string(arg1)))
8515 ret = get_errno(rmdir(p));
8516 unlock_user(p, arg1, 0);
8520 ret = get_errno(dup(arg1));
8522 fd_trans_dup(arg1, ret);
8525 #ifdef TARGET_NR_pipe
8526 case TARGET_NR_pipe:
8527 ret = do_pipe(cpu_env, arg1, 0, 0);
8530 #ifdef TARGET_NR_pipe2
8531 case TARGET_NR_pipe2:
8532 ret = do_pipe(cpu_env, arg1,
8533 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8536 case TARGET_NR_times:
8538 struct target_tms *tmsp;
8540 ret = get_errno(times(&tms));
8542 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8545 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8546 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8547 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8548 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8551 ret = host_to_target_clock_t(ret);
8554 #ifdef TARGET_NR_prof
8555 case TARGET_NR_prof:
8558 #ifdef TARGET_NR_signal
8559 case TARGET_NR_signal:
8562 case TARGET_NR_acct:
8564 ret = get_errno(acct(NULL));
8566 if (!(p = lock_user_string(arg1)))
8568 ret = get_errno(acct(path(p)));
8569 unlock_user(p, arg1, 0);
8572 #ifdef TARGET_NR_umount2
8573 case TARGET_NR_umount2:
8574 if (!(p = lock_user_string(arg1)))
8576 ret = get_errno(umount2(p, arg2));
8577 unlock_user(p, arg1, 0);
8580 #ifdef TARGET_NR_lock
8581 case TARGET_NR_lock:
8584 case TARGET_NR_ioctl:
8585 ret = do_ioctl(arg1, arg2, arg3);
8587 #ifdef TARGET_NR_fcntl
8588 case TARGET_NR_fcntl:
8589 ret = do_fcntl(arg1, arg2, arg3);
8592 #ifdef TARGET_NR_mpx
8596 case TARGET_NR_setpgid:
8597 ret = get_errno(setpgid(arg1, arg2));
8599 #ifdef TARGET_NR_ulimit
8600 case TARGET_NR_ulimit:
8603 #ifdef TARGET_NR_oldolduname
8604 case TARGET_NR_oldolduname:
8607 case TARGET_NR_umask:
8608 ret = get_errno(umask(arg1));
8610 case TARGET_NR_chroot:
8611 if (!(p = lock_user_string(arg1)))
8613 ret = get_errno(chroot(p));
8614 unlock_user(p, arg1, 0);
8616 #ifdef TARGET_NR_ustat
8617 case TARGET_NR_ustat:
8620 #ifdef TARGET_NR_dup2
8621 case TARGET_NR_dup2:
8622 ret = get_errno(dup2(arg1, arg2));
8624 fd_trans_dup(arg1, arg2);
8628 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8629 case TARGET_NR_dup3:
8633 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8636 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8637 ret = get_errno(dup3(arg1, arg2, host_flags));
8639 fd_trans_dup(arg1, arg2);
8644 #ifdef TARGET_NR_getppid /* not on alpha */
8645 case TARGET_NR_getppid:
8646 ret = get_errno(getppid());
8649 #ifdef TARGET_NR_getpgrp
8650 case TARGET_NR_getpgrp:
8651 ret = get_errno(getpgrp());
8654 case TARGET_NR_setsid:
8655 ret = get_errno(setsid());
8657 #ifdef TARGET_NR_sigaction
8658 case TARGET_NR_sigaction:
8660 #if defined(TARGET_ALPHA)
8661 struct target_sigaction act, oact, *pact = 0;
8662 struct target_old_sigaction *old_act;
8664 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8666 act._sa_handler = old_act->_sa_handler;
8667 target_siginitset(&act.sa_mask, old_act->sa_mask);
8668 act.sa_flags = old_act->sa_flags;
8669 act.sa_restorer = 0;
8670 unlock_user_struct(old_act, arg2, 0);
8673 ret = get_errno(do_sigaction(arg1, pact, &oact));
8674 if (!is_error(ret) && arg3) {
8675 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8677 old_act->_sa_handler = oact._sa_handler;
8678 old_act->sa_mask = oact.sa_mask.sig[0];
8679 old_act->sa_flags = oact.sa_flags;
8680 unlock_user_struct(old_act, arg3, 1);
8682 #elif defined(TARGET_MIPS)
8683 struct target_sigaction act, oact, *pact, *old_act;
8686 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8688 act._sa_handler = old_act->_sa_handler;
8689 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8690 act.sa_flags = old_act->sa_flags;
8691 unlock_user_struct(old_act, arg2, 0);
8697 ret = get_errno(do_sigaction(arg1, pact, &oact));
8699 if (!is_error(ret) && arg3) {
8700 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8702 old_act->_sa_handler = oact._sa_handler;
8703 old_act->sa_flags = oact.sa_flags;
8704 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8705 old_act->sa_mask.sig[1] = 0;
8706 old_act->sa_mask.sig[2] = 0;
8707 old_act->sa_mask.sig[3] = 0;
8708 unlock_user_struct(old_act, arg3, 1);
8711 struct target_old_sigaction *old_act;
8712 struct target_sigaction act, oact, *pact;
8714 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8716 act._sa_handler = old_act->_sa_handler;
8717 target_siginitset(&act.sa_mask, old_act->sa_mask);
8718 act.sa_flags = old_act->sa_flags;
8719 act.sa_restorer = old_act->sa_restorer;
8720 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8721 act.ka_restorer = 0;
8723 unlock_user_struct(old_act, arg2, 0);
8728 ret = get_errno(do_sigaction(arg1, pact, &oact));
8729 if (!is_error(ret) && arg3) {
8730 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8732 old_act->_sa_handler = oact._sa_handler;
8733 old_act->sa_mask = oact.sa_mask.sig[0];
8734 old_act->sa_flags = oact.sa_flags;
8735 old_act->sa_restorer = oact.sa_restorer;
8736 unlock_user_struct(old_act, arg3, 1);
8742 case TARGET_NR_rt_sigaction:
8744 #if defined(TARGET_ALPHA)
8745 /* For Alpha and SPARC this is a 5 argument syscall, with
8746 * a 'restorer' parameter which must be copied into the
8747 * sa_restorer field of the sigaction struct.
8748 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8749 * and arg5 is the sigsetsize.
8750 * Alpha also has a separate rt_sigaction struct that it uses
8751 * here; SPARC uses the usual sigaction struct.
8753 struct target_rt_sigaction *rt_act;
8754 struct target_sigaction act, oact, *pact = 0;
8756 if (arg4 != sizeof(target_sigset_t)) {
8757 ret = -TARGET_EINVAL;
8761 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8763 act._sa_handler = rt_act->_sa_handler;
8764 act.sa_mask = rt_act->sa_mask;
8765 act.sa_flags = rt_act->sa_flags;
8766 act.sa_restorer = arg5;
8767 unlock_user_struct(rt_act, arg2, 0);
8770 ret = get_errno(do_sigaction(arg1, pact, &oact));
8771 if (!is_error(ret) && arg3) {
8772 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8774 rt_act->_sa_handler = oact._sa_handler;
8775 rt_act->sa_mask = oact.sa_mask;
8776 rt_act->sa_flags = oact.sa_flags;
8777 unlock_user_struct(rt_act, arg3, 1);
8781 target_ulong restorer = arg4;
8782 target_ulong sigsetsize = arg5;
8784 target_ulong sigsetsize = arg4;
8786 struct target_sigaction *act;
8787 struct target_sigaction *oact;
8789 if (sigsetsize != sizeof(target_sigset_t)) {
8790 ret = -TARGET_EINVAL;
8794 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8797 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8798 act->ka_restorer = restorer;
8804 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8805 ret = -TARGET_EFAULT;
8806 goto rt_sigaction_fail;
8810 ret = get_errno(do_sigaction(arg1, act, oact));
8813 unlock_user_struct(act, arg2, 0);
8815 unlock_user_struct(oact, arg3, 1);
8819 #ifdef TARGET_NR_sgetmask /* not on alpha */
8820 case TARGET_NR_sgetmask:
8823 abi_ulong target_set;
8824 ret = do_sigprocmask(0, NULL, &cur_set);
8826 host_to_target_old_sigset(&target_set, &cur_set);
8832 #ifdef TARGET_NR_ssetmask /* not on alpha */
8833 case TARGET_NR_ssetmask:
8836 abi_ulong target_set = arg1;
8837 target_to_host_old_sigset(&set, &target_set);
8838 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8840 host_to_target_old_sigset(&target_set, &oset);
8846 #ifdef TARGET_NR_sigprocmask
8847 case TARGET_NR_sigprocmask:
8849 #if defined(TARGET_ALPHA)
8850 sigset_t set, oldset;
8855 case TARGET_SIG_BLOCK:
8858 case TARGET_SIG_UNBLOCK:
8861 case TARGET_SIG_SETMASK:
8865 ret = -TARGET_EINVAL;
8869 target_to_host_old_sigset(&set, &mask);
8871 ret = do_sigprocmask(how, &set, &oldset);
8872 if (!is_error(ret)) {
8873 host_to_target_old_sigset(&mask, &oldset);
8875 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8878 sigset_t set, oldset, *set_ptr;
8883 case TARGET_SIG_BLOCK:
8886 case TARGET_SIG_UNBLOCK:
8889 case TARGET_SIG_SETMASK:
8893 ret = -TARGET_EINVAL;
8896 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8898 target_to_host_old_sigset(&set, p);
8899 unlock_user(p, arg2, 0);
8905 ret = do_sigprocmask(how, set_ptr, &oldset);
8906 if (!is_error(ret) && arg3) {
8907 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8909 host_to_target_old_sigset(p, &oldset);
8910 unlock_user(p, arg3, sizeof(target_sigset_t));
8916 case TARGET_NR_rt_sigprocmask:
8919 sigset_t set, oldset, *set_ptr;
8921 if (arg4 != sizeof(target_sigset_t)) {
8922 ret = -TARGET_EINVAL;
8928 case TARGET_SIG_BLOCK:
8931 case TARGET_SIG_UNBLOCK:
8934 case TARGET_SIG_SETMASK:
8938 ret = -TARGET_EINVAL;
8941 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8943 target_to_host_sigset(&set, p);
8944 unlock_user(p, arg2, 0);
8950 ret = do_sigprocmask(how, set_ptr, &oldset);
8951 if (!is_error(ret) && arg3) {
8952 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8954 host_to_target_sigset(p, &oldset);
8955 unlock_user(p, arg3, sizeof(target_sigset_t));
8959 #ifdef TARGET_NR_sigpending
8960 case TARGET_NR_sigpending:
8963 ret = get_errno(sigpending(&set));
8964 if (!is_error(ret)) {
8965 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8967 host_to_target_old_sigset(p, &set);
8968 unlock_user(p, arg1, sizeof(target_sigset_t));
8973 case TARGET_NR_rt_sigpending:
8977 /* Yes, this check is >, not != like most. We follow the kernel's
8978 * logic and it does it like this because it implements
8979 * NR_sigpending through the same code path, and in that case
8980 * the old_sigset_t is smaller in size.
8982 if (arg2 > sizeof(target_sigset_t)) {
8983 ret = -TARGET_EINVAL;
8987 ret = get_errno(sigpending(&set));
8988 if (!is_error(ret)) {
8989 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8991 host_to_target_sigset(p, &set);
8992 unlock_user(p, arg1, sizeof(target_sigset_t));
8996 #ifdef TARGET_NR_sigsuspend
8997 case TARGET_NR_sigsuspend:
8999 TaskState *ts = cpu->opaque;
9000 #if defined(TARGET_ALPHA)
9001 abi_ulong mask = arg1;
9002 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9004 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9006 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9007 unlock_user(p, arg1, 0);
9009 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9011 if (ret != -TARGET_ERESTARTSYS) {
9012 ts->in_sigsuspend = 1;
9017 case TARGET_NR_rt_sigsuspend:
9019 TaskState *ts = cpu->opaque;
9021 if (arg2 != sizeof(target_sigset_t)) {
9022 ret = -TARGET_EINVAL;
9025 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9027 target_to_host_sigset(&ts->sigsuspend_mask, p);
9028 unlock_user(p, arg1, 0);
9029 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9031 if (ret != -TARGET_ERESTARTSYS) {
9032 ts->in_sigsuspend = 1;
9036 case TARGET_NR_rt_sigtimedwait:
9039 struct timespec uts, *puts;
9042 if (arg4 != sizeof(target_sigset_t)) {
9043 ret = -TARGET_EINVAL;
9047 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9049 target_to_host_sigset(&set, p);
9050 unlock_user(p, arg1, 0);
9053 target_to_host_timespec(puts, arg3);
9057 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9059 if (!is_error(ret)) {
9061 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9066 host_to_target_siginfo(p, &uinfo);
9067 unlock_user(p, arg2, sizeof(target_siginfo_t));
9069 ret = host_to_target_signal(ret);
9073 case TARGET_NR_rt_sigqueueinfo:
9077 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9081 target_to_host_siginfo(&uinfo, p);
9082 unlock_user(p, arg3, 0);
9083 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9086 case TARGET_NR_rt_tgsigqueueinfo:
9090 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9094 target_to_host_siginfo(&uinfo, p);
9095 unlock_user(p, arg4, 0);
9096 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9099 #ifdef TARGET_NR_sigreturn
9100 case TARGET_NR_sigreturn:
9101 if (block_signals()) {
9102 ret = -TARGET_ERESTARTSYS;
9104 ret = do_sigreturn(cpu_env);
9108 case TARGET_NR_rt_sigreturn:
9109 if (block_signals()) {
9110 ret = -TARGET_ERESTARTSYS;
9112 ret = do_rt_sigreturn(cpu_env);
9115 case TARGET_NR_sethostname:
9116 if (!(p = lock_user_string(arg1)))
9118 ret = get_errno(sethostname(p, arg2));
9119 unlock_user(p, arg1, 0);
9121 case TARGET_NR_setrlimit:
9123 int resource = target_to_host_resource(arg1);
9124 struct target_rlimit *target_rlim;
9126 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9128 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9129 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9130 unlock_user_struct(target_rlim, arg2, 0);
9131 ret = get_errno(setrlimit(resource, &rlim));
9134 case TARGET_NR_getrlimit:
9136 int resource = target_to_host_resource(arg1);
9137 struct target_rlimit *target_rlim;
9140 ret = get_errno(getrlimit(resource, &rlim));
9141 if (!is_error(ret)) {
9142 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9144 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9145 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9146 unlock_user_struct(target_rlim, arg2, 1);
9150 case TARGET_NR_getrusage:
9152 struct rusage rusage;
9153 ret = get_errno(getrusage(arg1, &rusage));
9154 if (!is_error(ret)) {
9155 ret = host_to_target_rusage(arg2, &rusage);
9159 case TARGET_NR_gettimeofday:
9162 ret = get_errno(gettimeofday(&tv, NULL));
9163 if (!is_error(ret)) {
9164 if (copy_to_user_timeval(arg1, &tv))
9169 case TARGET_NR_settimeofday:
9171 struct timeval tv, *ptv = NULL;
9172 struct timezone tz, *ptz = NULL;
9175 if (copy_from_user_timeval(&tv, arg1)) {
9182 if (copy_from_user_timezone(&tz, arg2)) {
9188 ret = get_errno(settimeofday(ptv, ptz));
9191 #if defined(TARGET_NR_select)
9192 case TARGET_NR_select:
9193 #if defined(TARGET_WANT_NI_OLD_SELECT)
9194 /* some architectures used to have old_select here
9195 * but now ENOSYS it.
9197 ret = -TARGET_ENOSYS;
9198 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9199 ret = do_old_select(arg1);
9201 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9205 #ifdef TARGET_NR_pselect6
9206 case TARGET_NR_pselect6:
9208 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9209 fd_set rfds, wfds, efds;
9210 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9211 struct timespec ts, *ts_ptr;
9214 * The 6th arg is actually two args smashed together,
9215 * so we cannot use the C library.
9223 abi_ulong arg_sigset, arg_sigsize, *arg7;
9224 target_sigset_t *target_sigset;
9232 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9236 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9240 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9246 * This takes a timespec, and not a timeval, so we cannot
9247 * use the do_select() helper ...
9250 if (target_to_host_timespec(&ts, ts_addr)) {
9258 /* Extract the two packed args for the sigset */
9261 sig.size = SIGSET_T_SIZE;
9263 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9267 arg_sigset = tswapal(arg7[0]);
9268 arg_sigsize = tswapal(arg7[1]);
9269 unlock_user(arg7, arg6, 0);
9273 if (arg_sigsize != sizeof(*target_sigset)) {
9274 /* Like the kernel, we enforce correct size sigsets */
9275 ret = -TARGET_EINVAL;
9278 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9279 sizeof(*target_sigset), 1);
9280 if (!target_sigset) {
9283 target_to_host_sigset(&set, target_sigset);
9284 unlock_user(target_sigset, arg_sigset, 0);
9292 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9295 if (!is_error(ret)) {
9296 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9298 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9300 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9303 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9309 #ifdef TARGET_NR_symlink
9310 case TARGET_NR_symlink:
9313 p = lock_user_string(arg1);
9314 p2 = lock_user_string(arg2);
9316 ret = -TARGET_EFAULT;
9318 ret = get_errno(symlink(p, p2));
9319 unlock_user(p2, arg2, 0);
9320 unlock_user(p, arg1, 0);
9324 #if defined(TARGET_NR_symlinkat)
9325 case TARGET_NR_symlinkat:
9328 p = lock_user_string(arg1);
9329 p2 = lock_user_string(arg3);
9331 ret = -TARGET_EFAULT;
9333 ret = get_errno(symlinkat(p, arg2, p2));
9334 unlock_user(p2, arg3, 0);
9335 unlock_user(p, arg1, 0);
9339 #ifdef TARGET_NR_oldlstat
9340 case TARGET_NR_oldlstat:
9343 #ifdef TARGET_NR_readlink
9344 case TARGET_NR_readlink:
9347 p = lock_user_string(arg1);
9348 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9350 ret = -TARGET_EFAULT;
9352 /* Short circuit this for the magic exe check. */
9353 ret = -TARGET_EINVAL;
9354 } else if (is_proc_myself((const char *)p, "exe")) {
9355 char real[PATH_MAX], *temp;
9356 temp = realpath(exec_path, real);
9357 /* Return value is # of bytes that we wrote to the buffer. */
9359 ret = get_errno(-1);
9361 /* Don't worry about sign mismatch as earlier mapping
9362 * logic would have thrown a bad address error. */
9363 ret = MIN(strlen(real), arg3);
9364 /* We cannot NUL terminate the string. */
9365 memcpy(p2, real, ret);
9368 ret = get_errno(readlink(path(p), p2, arg3));
9370 unlock_user(p2, arg2, ret);
9371 unlock_user(p, arg1, 0);
9375 #if defined(TARGET_NR_readlinkat)
9376 case TARGET_NR_readlinkat:
9379 p = lock_user_string(arg2);
9380 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9382 ret = -TARGET_EFAULT;
9383 } else if (is_proc_myself((const char *)p, "exe")) {
9384 char real[PATH_MAX], *temp;
9385 temp = realpath(exec_path, real);
9386 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9387 snprintf((char *)p2, arg4, "%s", real);
9389 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9391 unlock_user(p2, arg3, ret);
9392 unlock_user(p, arg2, 0);
9396 #ifdef TARGET_NR_uselib
9397 case TARGET_NR_uselib:
9400 #ifdef TARGET_NR_swapon
9401 case TARGET_NR_swapon:
9402 if (!(p = lock_user_string(arg1)))
9404 ret = get_errno(swapon(p, arg2));
9405 unlock_user(p, arg1, 0);
9408 case TARGET_NR_reboot:
9409 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9410 /* arg4 must be ignored in all other cases */
9411 p = lock_user_string(arg4);
9415 ret = get_errno(reboot(arg1, arg2, arg3, p));
9416 unlock_user(p, arg4, 0);
9418 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9421 #ifdef TARGET_NR_readdir
9422 case TARGET_NR_readdir:
9425 #ifdef TARGET_NR_mmap
9426 case TARGET_NR_mmap:
9427 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9428 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9429 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9430 || defined(TARGET_S390X)
9433 abi_ulong v1, v2, v3, v4, v5, v6;
9434 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9442 unlock_user(v, arg1, 0);
9443 ret = get_errno(target_mmap(v1, v2, v3,
9444 target_to_host_bitmask(v4, mmap_flags_tbl),
9448 ret = get_errno(target_mmap(arg1, arg2, arg3,
9449 target_to_host_bitmask(arg4, mmap_flags_tbl),
9455 #ifdef TARGET_NR_mmap2
9456 case TARGET_NR_mmap2:
9458 #define MMAP_SHIFT 12
9460 ret = get_errno(target_mmap(arg1, arg2, arg3,
9461 target_to_host_bitmask(arg4, mmap_flags_tbl),
9463 arg6 << MMAP_SHIFT));
9466 case TARGET_NR_munmap:
9467 ret = get_errno(target_munmap(arg1, arg2));
9469 case TARGET_NR_mprotect:
9471 TaskState *ts = cpu->opaque;
9472 /* Special hack to detect libc making the stack executable. */
9473 if ((arg3 & PROT_GROWSDOWN)
9474 && arg1 >= ts->info->stack_limit
9475 && arg1 <= ts->info->start_stack) {
9476 arg3 &= ~PROT_GROWSDOWN;
9477 arg2 = arg2 + arg1 - ts->info->stack_limit;
9478 arg1 = ts->info->stack_limit;
9481 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9483 #ifdef TARGET_NR_mremap
9484 case TARGET_NR_mremap:
9485 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9488 /* ??? msync/mlock/munlock are broken for softmmu. */
9489 #ifdef TARGET_NR_msync
9490 case TARGET_NR_msync:
9491 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9494 #ifdef TARGET_NR_mlock
9495 case TARGET_NR_mlock:
9496 ret = get_errno(mlock(g2h(arg1), arg2));
9499 #ifdef TARGET_NR_munlock
9500 case TARGET_NR_munlock:
9501 ret = get_errno(munlock(g2h(arg1), arg2));
9504 #ifdef TARGET_NR_mlockall
9505 case TARGET_NR_mlockall:
9506 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9509 #ifdef TARGET_NR_munlockall
9510 case TARGET_NR_munlockall:
9511 ret = get_errno(munlockall());
9514 case TARGET_NR_truncate:
9515 if (!(p = lock_user_string(arg1)))
9517 ret = get_errno(truncate(p, arg2));
9518 unlock_user(p, arg1, 0);
9520 case TARGET_NR_ftruncate:
9521 ret = get_errno(ftruncate(arg1, arg2));
9523 case TARGET_NR_fchmod:
9524 ret = get_errno(fchmod(arg1, arg2));
9526 #if defined(TARGET_NR_fchmodat)
9527 case TARGET_NR_fchmodat:
9528 if (!(p = lock_user_string(arg2)))
9530 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9531 unlock_user(p, arg2, 0);
9534 case TARGET_NR_getpriority:
9535 /* Note that negative values are valid for getpriority, so we must
9536 differentiate based on errno settings. */
9538 ret = getpriority(arg1, arg2);
9539 if (ret == -1 && errno != 0) {
9540 ret = -host_to_target_errno(errno);
9544 /* Return value is the unbiased priority. Signal no error. */
9545 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9547 /* Return value is a biased priority to avoid negative numbers. */
9551 case TARGET_NR_setpriority:
9552 ret = get_errno(setpriority(arg1, arg2, arg3));
9554 #ifdef TARGET_NR_profil
9555 case TARGET_NR_profil:
9558 case TARGET_NR_statfs:
9559 if (!(p = lock_user_string(arg1)))
9561 ret = get_errno(statfs(path(p), &stfs));
9562 unlock_user(p, arg1, 0);
9564 if (!is_error(ret)) {
9565 struct target_statfs *target_stfs;
9567 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9569 __put_user(stfs.f_type, &target_stfs->f_type);
9570 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9571 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9572 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9573 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9574 __put_user(stfs.f_files, &target_stfs->f_files);
9575 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9576 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9577 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9578 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9579 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9580 #ifdef _STATFS_F_FLAGS
9581 __put_user(stfs.f_flags, &target_stfs->f_flags);
9583 __put_user(0, &target_stfs->f_flags);
9585 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9586 unlock_user_struct(target_stfs, arg2, 1);
9589 case TARGET_NR_fstatfs:
9590 ret = get_errno(fstatfs(arg1, &stfs));
9591 goto convert_statfs;
9592 #ifdef TARGET_NR_statfs64
9593 case TARGET_NR_statfs64:
9594 if (!(p = lock_user_string(arg1)))
9596 ret = get_errno(statfs(path(p), &stfs));
9597 unlock_user(p, arg1, 0);
9599 if (!is_error(ret)) {
9600 struct target_statfs64 *target_stfs;
9602 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9604 __put_user(stfs.f_type, &target_stfs->f_type);
9605 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9606 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9607 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9608 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9609 __put_user(stfs.f_files, &target_stfs->f_files);
9610 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9611 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9612 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9613 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9614 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9615 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9616 unlock_user_struct(target_stfs, arg3, 1);
9619 case TARGET_NR_fstatfs64:
9620 ret = get_errno(fstatfs(arg1, &stfs));
9621 goto convert_statfs64;
9623 #ifdef TARGET_NR_ioperm
9624 case TARGET_NR_ioperm:
9627 #ifdef TARGET_NR_socketcall
9628 case TARGET_NR_socketcall:
9629 ret = do_socketcall(arg1, arg2);
9632 #ifdef TARGET_NR_accept
9633 case TARGET_NR_accept:
9634 ret = do_accept4(arg1, arg2, arg3, 0);
9637 #ifdef TARGET_NR_accept4
9638 case TARGET_NR_accept4:
9639 ret = do_accept4(arg1, arg2, arg3, arg4);
9642 #ifdef TARGET_NR_bind
9643 case TARGET_NR_bind:
9644 ret = do_bind(arg1, arg2, arg3);
9647 #ifdef TARGET_NR_connect
9648 case TARGET_NR_connect:
9649 ret = do_connect(arg1, arg2, arg3);
9652 #ifdef TARGET_NR_getpeername
9653 case TARGET_NR_getpeername:
9654 ret = do_getpeername(arg1, arg2, arg3);
9657 #ifdef TARGET_NR_getsockname
9658 case TARGET_NR_getsockname:
9659 ret = do_getsockname(arg1, arg2, arg3);
9662 #ifdef TARGET_NR_getsockopt
9663 case TARGET_NR_getsockopt:
9664 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9667 #ifdef TARGET_NR_listen
9668 case TARGET_NR_listen:
9669 ret = get_errno(listen(arg1, arg2));
9672 #ifdef TARGET_NR_recv
9673 case TARGET_NR_recv:
9674 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9677 #ifdef TARGET_NR_recvfrom
9678 case TARGET_NR_recvfrom:
9679 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9682 #ifdef TARGET_NR_recvmsg
9683 case TARGET_NR_recvmsg:
9684 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9687 #ifdef TARGET_NR_send
9688 case TARGET_NR_send:
9689 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9692 #ifdef TARGET_NR_sendmsg
9693 case TARGET_NR_sendmsg:
9694 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9697 #ifdef TARGET_NR_sendmmsg
9698 case TARGET_NR_sendmmsg:
9699 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9701 case TARGET_NR_recvmmsg:
9702 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9705 #ifdef TARGET_NR_sendto
9706 case TARGET_NR_sendto:
9707 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9710 #ifdef TARGET_NR_shutdown
9711 case TARGET_NR_shutdown:
9712 ret = get_errno(shutdown(arg1, arg2));
9715 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9716 case TARGET_NR_getrandom:
9717 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9721 ret = get_errno(getrandom(p, arg2, arg3));
9722 unlock_user(p, arg1, ret);
9725 #ifdef TARGET_NR_socket
9726 case TARGET_NR_socket:
9727 ret = do_socket(arg1, arg2, arg3);
9730 #ifdef TARGET_NR_socketpair
9731 case TARGET_NR_socketpair:
9732 ret = do_socketpair(arg1, arg2, arg3, arg4);
9735 #ifdef TARGET_NR_setsockopt
9736 case TARGET_NR_setsockopt:
9737 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9740 #if defined(TARGET_NR_syslog)
9741 case TARGET_NR_syslog:
9746 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9747 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9748 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9749 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9750 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9751 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9752 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9753 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9755 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9758 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9759 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9760 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9762 ret = -TARGET_EINVAL;
9770 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9772 ret = -TARGET_EFAULT;
9775 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9776 unlock_user(p, arg2, arg3);
9786 case TARGET_NR_setitimer:
9788 struct itimerval value, ovalue, *pvalue;
9792 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9793 || copy_from_user_timeval(&pvalue->it_value,
9794 arg2 + sizeof(struct target_timeval)))
9799 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9800 if (!is_error(ret) && arg3) {
9801 if (copy_to_user_timeval(arg3,
9802 &ovalue.it_interval)
9803 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9809 case TARGET_NR_getitimer:
9811 struct itimerval value;
9813 ret = get_errno(getitimer(arg1, &value));
9814 if (!is_error(ret) && arg2) {
9815 if (copy_to_user_timeval(arg2,
9817 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9823 #ifdef TARGET_NR_stat
9824 case TARGET_NR_stat:
9825 if (!(p = lock_user_string(arg1)))
9827 ret = get_errno(stat(path(p), &st));
9828 unlock_user(p, arg1, 0);
9831 #ifdef TARGET_NR_lstat
9832 case TARGET_NR_lstat:
9833 if (!(p = lock_user_string(arg1)))
9835 ret = get_errno(lstat(path(p), &st));
9836 unlock_user(p, arg1, 0);
9839 case TARGET_NR_fstat:
9841 ret = get_errno(fstat(arg1, &st));
9842 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9845 if (!is_error(ret)) {
9846 struct target_stat *target_st;
9848 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9850 memset(target_st, 0, sizeof(*target_st));
9851 __put_user(st.st_dev, &target_st->st_dev);
9852 __put_user(st.st_ino, &target_st->st_ino);
9853 __put_user(st.st_mode, &target_st->st_mode);
9854 __put_user(st.st_uid, &target_st->st_uid);
9855 __put_user(st.st_gid, &target_st->st_gid);
9856 __put_user(st.st_nlink, &target_st->st_nlink);
9857 __put_user(st.st_rdev, &target_st->st_rdev);
9858 __put_user(st.st_size, &target_st->st_size);
9859 __put_user(st.st_blksize, &target_st->st_blksize);
9860 __put_user(st.st_blocks, &target_st->st_blocks);
9861 __put_user(st.st_atime, &target_st->target_st_atime);
9862 __put_user(st.st_mtime, &target_st->target_st_mtime);
9863 __put_user(st.st_ctime, &target_st->target_st_ctime);
9864 unlock_user_struct(target_st, arg2, 1);
9868 #ifdef TARGET_NR_olduname
9869 case TARGET_NR_olduname:
9872 #ifdef TARGET_NR_iopl
9873 case TARGET_NR_iopl:
9876 case TARGET_NR_vhangup:
9877 ret = get_errno(vhangup());
9879 #ifdef TARGET_NR_idle
9880 case TARGET_NR_idle:
9883 #ifdef TARGET_NR_syscall
9884 case TARGET_NR_syscall:
9885 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9886 arg6, arg7, arg8, 0);
9889 case TARGET_NR_wait4:
9892 abi_long status_ptr = arg2;
9893 struct rusage rusage, *rusage_ptr;
9894 abi_ulong target_rusage = arg4;
9895 abi_long rusage_err;
9897 rusage_ptr = &rusage;
9900 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9901 if (!is_error(ret)) {
9902 if (status_ptr && ret) {
9903 status = host_to_target_waitstatus(status);
9904 if (put_user_s32(status, status_ptr))
9907 if (target_rusage) {
9908 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9916 #ifdef TARGET_NR_swapoff
9917 case TARGET_NR_swapoff:
9918 if (!(p = lock_user_string(arg1)))
9920 ret = get_errno(swapoff(p));
9921 unlock_user(p, arg1, 0);
9924 case TARGET_NR_sysinfo:
9926 struct target_sysinfo *target_value;
9927 struct sysinfo value;
9928 ret = get_errno(sysinfo(&value));
9929 if (!is_error(ret) && arg1)
9931 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9933 __put_user(value.uptime, &target_value->uptime);
9934 __put_user(value.loads[0], &target_value->loads[0]);
9935 __put_user(value.loads[1], &target_value->loads[1]);
9936 __put_user(value.loads[2], &target_value->loads[2]);
9937 __put_user(value.totalram, &target_value->totalram);
9938 __put_user(value.freeram, &target_value->freeram);
9939 __put_user(value.sharedram, &target_value->sharedram);
9940 __put_user(value.bufferram, &target_value->bufferram);
9941 __put_user(value.totalswap, &target_value->totalswap);
9942 __put_user(value.freeswap, &target_value->freeswap);
9943 __put_user(value.procs, &target_value->procs);
9944 __put_user(value.totalhigh, &target_value->totalhigh);
9945 __put_user(value.freehigh, &target_value->freehigh);
9946 __put_user(value.mem_unit, &target_value->mem_unit);
9947 unlock_user_struct(target_value, arg1, 1);
9951 #ifdef TARGET_NR_ipc
9953 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9956 #ifdef TARGET_NR_semget
9957 case TARGET_NR_semget:
9958 ret = get_errno(semget(arg1, arg2, arg3));
9961 #ifdef TARGET_NR_semop
9962 case TARGET_NR_semop:
9963 ret = do_semop(arg1, arg2, arg3);
9966 #ifdef TARGET_NR_semctl
9967 case TARGET_NR_semctl:
9968 ret = do_semctl(arg1, arg2, arg3, arg4);
9971 #ifdef TARGET_NR_msgctl
9972 case TARGET_NR_msgctl:
9973 ret = do_msgctl(arg1, arg2, arg3);
9976 #ifdef TARGET_NR_msgget
9977 case TARGET_NR_msgget:
9978 ret = get_errno(msgget(arg1, arg2));
9981 #ifdef TARGET_NR_msgrcv
9982 case TARGET_NR_msgrcv:
9983 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9986 #ifdef TARGET_NR_msgsnd
9987 case TARGET_NR_msgsnd:
9988 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9991 #ifdef TARGET_NR_shmget
9992 case TARGET_NR_shmget:
9993 ret = get_errno(shmget(arg1, arg2, arg3));
9996 #ifdef TARGET_NR_shmctl
9997 case TARGET_NR_shmctl:
9998 ret = do_shmctl(arg1, arg2, arg3);
10001 #ifdef TARGET_NR_shmat
10002 case TARGET_NR_shmat:
10003 ret = do_shmat(cpu_env, arg1, arg2, arg3);
10006 #ifdef TARGET_NR_shmdt
10007 case TARGET_NR_shmdt:
10008 ret = do_shmdt(arg1);
10011 case TARGET_NR_fsync:
10012 ret = get_errno(fsync(arg1));
10014 case TARGET_NR_clone:
10015 /* Linux manages to have three different orderings for its
10016 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10017 * match the kernel's CONFIG_CLONE_* settings.
10018 * Microblaze is further special in that it uses a sixth
10019 * implicit argument to clone for the TLS pointer.
10021 #if defined(TARGET_MICROBLAZE)
10022 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10023 #elif defined(TARGET_CLONE_BACKWARDS)
10024 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10025 #elif defined(TARGET_CLONE_BACKWARDS2)
10026 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10028 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10031 #ifdef __NR_exit_group
10032 /* new thread calls */
10033 case TARGET_NR_exit_group:
10034 #ifdef TARGET_GPROF
10037 gdb_exit(cpu_env, arg1);
10038 ret = get_errno(exit_group(arg1));
10041 case TARGET_NR_setdomainname:
10042 if (!(p = lock_user_string(arg1)))
10044 ret = get_errno(setdomainname(p, arg2));
10045 unlock_user(p, arg1, 0);
10047 case TARGET_NR_uname:
10048 /* no need to transcode because we use the linux syscall */
10050 struct new_utsname * buf;
10052 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10054 ret = get_errno(sys_uname(buf));
10055 if (!is_error(ret)) {
10056 /* Overwrite the native machine name with whatever is being
10058 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
10059 /* Allow the user to override the reported release. */
10060 if (qemu_uname_release && *qemu_uname_release) {
10061 g_strlcpy(buf->release, qemu_uname_release,
10062 sizeof(buf->release));
10065 unlock_user_struct(buf, arg1, 1);
10069 case TARGET_NR_modify_ldt:
10070 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10072 #if !defined(TARGET_X86_64)
10073 case TARGET_NR_vm86old:
10074 goto unimplemented;
10075 case TARGET_NR_vm86:
10076 ret = do_vm86(cpu_env, arg1, arg2);
10080 case TARGET_NR_adjtimex:
10082 struct timex host_buf;
10084 if (target_to_host_timex(&host_buf, arg1) != 0) {
10087 ret = get_errno(adjtimex(&host_buf));
10088 if (!is_error(ret)) {
10089 if (host_to_target_timex(arg1, &host_buf) != 0) {
10095 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10096 case TARGET_NR_clock_adjtime:
10098 struct timex htx, *phtx = &htx;
10100 if (target_to_host_timex(phtx, arg2) != 0) {
10103 ret = get_errno(clock_adjtime(arg1, phtx));
10104 if (!is_error(ret) && phtx) {
10105 if (host_to_target_timex(arg2, phtx) != 0) {
10112 #ifdef TARGET_NR_create_module
10113 case TARGET_NR_create_module:
10115 case TARGET_NR_init_module:
10116 case TARGET_NR_delete_module:
10117 #ifdef TARGET_NR_get_kernel_syms
10118 case TARGET_NR_get_kernel_syms:
10120 goto unimplemented;
10121 case TARGET_NR_quotactl:
10122 goto unimplemented;
10123 case TARGET_NR_getpgid:
10124 ret = get_errno(getpgid(arg1));
10126 case TARGET_NR_fchdir:
10127 ret = get_errno(fchdir(arg1));
10129 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10130 case TARGET_NR_bdflush:
10131 goto unimplemented;
10133 #ifdef TARGET_NR_sysfs
10134 case TARGET_NR_sysfs:
10135 goto unimplemented;
10137 case TARGET_NR_personality:
10138 ret = get_errno(personality(arg1));
10140 #ifdef TARGET_NR_afs_syscall
10141 case TARGET_NR_afs_syscall:
10142 goto unimplemented;
10144 #ifdef TARGET_NR__llseek /* Not on alpha */
10145 case TARGET_NR__llseek:
10148 #if !defined(__NR_llseek)
10149 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10151 ret = get_errno(res);
10156 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10158 if ((ret == 0) && put_user_s64(res, arg4)) {
10164 #ifdef TARGET_NR_getdents
10165 case TARGET_NR_getdents:
10166 #ifdef __NR_getdents
10167 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10169 struct target_dirent *target_dirp;
10170 struct linux_dirent *dirp;
10171 abi_long count = arg3;
10173 dirp = g_try_malloc(count);
10175 ret = -TARGET_ENOMEM;
10179 ret = get_errno(sys_getdents(arg1, dirp, count));
10180 if (!is_error(ret)) {
10181 struct linux_dirent *de;
10182 struct target_dirent *tde;
10184 int reclen, treclen;
10185 int count1, tnamelen;
10189 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10193 reclen = de->d_reclen;
10194 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10195 assert(tnamelen >= 0);
10196 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10197 assert(count1 + treclen <= count);
10198 tde->d_reclen = tswap16(treclen);
10199 tde->d_ino = tswapal(de->d_ino);
10200 tde->d_off = tswapal(de->d_off);
10201 memcpy(tde->d_name, de->d_name, tnamelen);
10202 de = (struct linux_dirent *)((char *)de + reclen);
10204 tde = (struct target_dirent *)((char *)tde + treclen);
10208 unlock_user(target_dirp, arg2, ret);
10214 struct linux_dirent *dirp;
10215 abi_long count = arg3;
10217 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10219 ret = get_errno(sys_getdents(arg1, dirp, count));
10220 if (!is_error(ret)) {
10221 struct linux_dirent *de;
10226 reclen = de->d_reclen;
10229 de->d_reclen = tswap16(reclen);
10230 tswapls(&de->d_ino);
10231 tswapls(&de->d_off);
10232 de = (struct linux_dirent *)((char *)de + reclen);
10236 unlock_user(dirp, arg2, ret);
10240 /* Implement getdents in terms of getdents64 */
10242 struct linux_dirent64 *dirp;
10243 abi_long count = arg3;
10245 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10249 ret = get_errno(sys_getdents64(arg1, dirp, count));
10250 if (!is_error(ret)) {
10251 /* Convert the dirent64 structs to target dirent. We do this
10252 * in-place, since we can guarantee that a target_dirent is no
10253 * larger than a dirent64; however this means we have to be
10254 * careful to read everything before writing in the new format.
10256 struct linux_dirent64 *de;
10257 struct target_dirent *tde;
10262 tde = (struct target_dirent *)dirp;
10264 int namelen, treclen;
10265 int reclen = de->d_reclen;
10266 uint64_t ino = de->d_ino;
10267 int64_t off = de->d_off;
10268 uint8_t type = de->d_type;
10270 namelen = strlen(de->d_name);
10271 treclen = offsetof(struct target_dirent, d_name)
10273 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10275 memmove(tde->d_name, de->d_name, namelen + 1);
10276 tde->d_ino = tswapal(ino);
10277 tde->d_off = tswapal(off);
10278 tde->d_reclen = tswap16(treclen);
10279 /* The target_dirent type is in what was formerly a padding
10280 * byte at the end of the structure:
10282 *(((char *)tde) + treclen - 1) = type;
10284 de = (struct linux_dirent64 *)((char *)de + reclen);
10285 tde = (struct target_dirent *)((char *)tde + treclen);
10291 unlock_user(dirp, arg2, ret);
10295 #endif /* TARGET_NR_getdents */
10296 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10297 case TARGET_NR_getdents64:
10299 struct linux_dirent64 *dirp;
10300 abi_long count = arg3;
10301 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10303 ret = get_errno(sys_getdents64(arg1, dirp, count));
10304 if (!is_error(ret)) {
10305 struct linux_dirent64 *de;
10310 reclen = de->d_reclen;
10313 de->d_reclen = tswap16(reclen);
10314 tswap64s((uint64_t *)&de->d_ino);
10315 tswap64s((uint64_t *)&de->d_off);
10316 de = (struct linux_dirent64 *)((char *)de + reclen);
10320 unlock_user(dirp, arg2, ret);
10323 #endif /* TARGET_NR_getdents64 */
10324 #if defined(TARGET_NR__newselect)
10325 case TARGET_NR__newselect:
10326 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10329 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10330 # ifdef TARGET_NR_poll
10331 case TARGET_NR_poll:
10333 # ifdef TARGET_NR_ppoll
10334 case TARGET_NR_ppoll:
10337 struct target_pollfd *target_pfd;
10338 unsigned int nfds = arg2;
10339 struct pollfd *pfd;
10345 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10346 ret = -TARGET_EINVAL;
10350 target_pfd = lock_user(VERIFY_WRITE, arg1,
10351 sizeof(struct target_pollfd) * nfds, 1);
10356 pfd = alloca(sizeof(struct pollfd) * nfds);
10357 for (i = 0; i < nfds; i++) {
10358 pfd[i].fd = tswap32(target_pfd[i].fd);
10359 pfd[i].events = tswap16(target_pfd[i].events);
10364 # ifdef TARGET_NR_ppoll
10365 case TARGET_NR_ppoll:
10367 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10368 target_sigset_t *target_set;
10369 sigset_t _set, *set = &_set;
10372 if (target_to_host_timespec(timeout_ts, arg3)) {
10373 unlock_user(target_pfd, arg1, 0);
10381 if (arg5 != sizeof(target_sigset_t)) {
10382 unlock_user(target_pfd, arg1, 0);
10383 ret = -TARGET_EINVAL;
10387 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10389 unlock_user(target_pfd, arg1, 0);
10392 target_to_host_sigset(set, target_set);
10397 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10398 set, SIGSET_T_SIZE));
10400 if (!is_error(ret) && arg3) {
10401 host_to_target_timespec(arg3, timeout_ts);
10404 unlock_user(target_set, arg4, 0);
10409 # ifdef TARGET_NR_poll
10410 case TARGET_NR_poll:
10412 struct timespec ts, *pts;
10415 /* Convert ms to secs, ns */
10416 ts.tv_sec = arg3 / 1000;
10417 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10420 /* -ve poll() timeout means "infinite" */
10423 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10428 g_assert_not_reached();
10431 if (!is_error(ret)) {
10432 for(i = 0; i < nfds; i++) {
10433 target_pfd[i].revents = tswap16(pfd[i].revents);
10436 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10440 case TARGET_NR_flock:
10441 /* NOTE: the flock constant seems to be the same for every
10443 ret = get_errno(safe_flock(arg1, arg2));
10445 case TARGET_NR_readv:
10447 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10449 ret = get_errno(safe_readv(arg1, vec, arg3));
10450 unlock_iovec(vec, arg2, arg3, 1);
10452 ret = -host_to_target_errno(errno);
10456 case TARGET_NR_writev:
10458 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10460 ret = get_errno(safe_writev(arg1, vec, arg3));
10461 unlock_iovec(vec, arg2, arg3, 0);
10463 ret = -host_to_target_errno(errno);
10467 #if defined(TARGET_NR_preadv)
10468 case TARGET_NR_preadv:
10470 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10472 unsigned long low, high;
10474 target_to_host_low_high(arg4, arg5, &low, &high);
10475 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10476 unlock_iovec(vec, arg2, arg3, 1);
10478 ret = -host_to_target_errno(errno);
10483 #if defined(TARGET_NR_pwritev)
10484 case TARGET_NR_pwritev:
10486 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10488 unsigned long low, high;
10490 target_to_host_low_high(arg4, arg5, &low, &high);
10491 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10492 unlock_iovec(vec, arg2, arg3, 0);
10494 ret = -host_to_target_errno(errno);
10499 case TARGET_NR_getsid:
10500 ret = get_errno(getsid(arg1));
10502 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10503 case TARGET_NR_fdatasync:
10504 ret = get_errno(fdatasync(arg1));
10507 #ifdef TARGET_NR__sysctl
10508 case TARGET_NR__sysctl:
10509 /* We don't implement this, but ENOTDIR is always a safe
10511 ret = -TARGET_ENOTDIR;
10514 case TARGET_NR_sched_getaffinity:
10516 unsigned int mask_size;
10517 unsigned long *mask;
10520 * sched_getaffinity needs multiples of ulong, so need to take
10521 * care of mismatches between target ulong and host ulong sizes.
10523 if (arg2 & (sizeof(abi_ulong) - 1)) {
10524 ret = -TARGET_EINVAL;
10527 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10529 mask = alloca(mask_size);
10530 memset(mask, 0, mask_size);
10531 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10533 if (!is_error(ret)) {
10535 /* More data returned than the caller's buffer will fit.
10536 * This only happens if sizeof(abi_long) < sizeof(long)
10537 * and the caller passed us a buffer holding an odd number
10538 * of abi_longs. If the host kernel is actually using the
10539 * extra 4 bytes then fail EINVAL; otherwise we can just
10540 * ignore them and only copy the interesting part.
10542 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10543 if (numcpus > arg2 * 8) {
10544 ret = -TARGET_EINVAL;
10550 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10556 case TARGET_NR_sched_setaffinity:
10558 unsigned int mask_size;
10559 unsigned long *mask;
10562 * sched_setaffinity needs multiples of ulong, so need to take
10563 * care of mismatches between target ulong and host ulong sizes.
10565 if (arg2 & (sizeof(abi_ulong) - 1)) {
10566 ret = -TARGET_EINVAL;
10569 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10570 mask = alloca(mask_size);
10572 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10577 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10580 case TARGET_NR_getcpu:
10582 unsigned cpu, node;
10583 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10584 arg2 ? &node : NULL,
10586 if (is_error(ret)) {
10589 if (arg1 && put_user_u32(cpu, arg1)) {
10592 if (arg2 && put_user_u32(node, arg2)) {
10597 case TARGET_NR_sched_setparam:
10599 struct sched_param *target_schp;
10600 struct sched_param schp;
10603 return -TARGET_EINVAL;
10605 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10607 schp.sched_priority = tswap32(target_schp->sched_priority);
10608 unlock_user_struct(target_schp, arg2, 0);
10609 ret = get_errno(sched_setparam(arg1, &schp));
10612 case TARGET_NR_sched_getparam:
10614 struct sched_param *target_schp;
10615 struct sched_param schp;
10618 return -TARGET_EINVAL;
10620 ret = get_errno(sched_getparam(arg1, &schp));
10621 if (!is_error(ret)) {
10622 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10624 target_schp->sched_priority = tswap32(schp.sched_priority);
10625 unlock_user_struct(target_schp, arg2, 1);
10629 case TARGET_NR_sched_setscheduler:
10631 struct sched_param *target_schp;
10632 struct sched_param schp;
10634 return -TARGET_EINVAL;
10636 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10638 schp.sched_priority = tswap32(target_schp->sched_priority);
10639 unlock_user_struct(target_schp, arg3, 0);
10640 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10643 case TARGET_NR_sched_getscheduler:
10644 ret = get_errno(sched_getscheduler(arg1));
10646 case TARGET_NR_sched_yield:
10647 ret = get_errno(sched_yield());
10649 case TARGET_NR_sched_get_priority_max:
10650 ret = get_errno(sched_get_priority_max(arg1));
10652 case TARGET_NR_sched_get_priority_min:
10653 ret = get_errno(sched_get_priority_min(arg1));
10655 case TARGET_NR_sched_rr_get_interval:
10657 struct timespec ts;
10658 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10659 if (!is_error(ret)) {
10660 ret = host_to_target_timespec(arg2, &ts);
10664 case TARGET_NR_nanosleep:
10666 struct timespec req, rem;
10667 target_to_host_timespec(&req, arg1);
10668 ret = get_errno(safe_nanosleep(&req, &rem));
10669 if (is_error(ret) && arg2) {
10670 host_to_target_timespec(arg2, &rem);
10674 #ifdef TARGET_NR_query_module
10675 case TARGET_NR_query_module:
10676 goto unimplemented;
10678 #ifdef TARGET_NR_nfsservctl
10679 case TARGET_NR_nfsservctl:
10680 goto unimplemented;
10682 case TARGET_NR_prctl:
10684 case PR_GET_PDEATHSIG:
10687 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10688 if (!is_error(ret) && arg2
10689 && put_user_ual(deathsig, arg2)) {
10697 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10701 ret = get_errno(prctl(arg1, (unsigned long)name,
10702 arg3, arg4, arg5));
10703 unlock_user(name, arg2, 16);
10708 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10712 ret = get_errno(prctl(arg1, (unsigned long)name,
10713 arg3, arg4, arg5));
10714 unlock_user(name, arg2, 0);
10718 #ifdef TARGET_AARCH64
10719 case TARGET_PR_SVE_SET_VL:
10720 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10721 or PR_SVE_VL_INHERIT. Therefore, anything above
10722 ARM_MAX_VQ results in EINVAL. */
10723 ret = -TARGET_EINVAL;
10724 if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10725 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10726 CPUARMState *env = cpu_env;
10727 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10728 int vq = MAX(arg2 / 16, 1);
10731 aarch64_sve_narrow_vq(env, vq);
10733 env->vfp.zcr_el[1] = vq - 1;
10737 case TARGET_PR_SVE_GET_VL:
10738 ret = -TARGET_EINVAL;
10739 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10740 CPUARMState *env = cpu_env;
10741 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10744 #endif /* AARCH64 */
10745 case PR_GET_SECCOMP:
10746 case PR_SET_SECCOMP:
10747 /* Disable seccomp to prevent the target disabling syscalls we
10749 ret = -TARGET_EINVAL;
10752 /* Most prctl options have no pointer arguments */
10753 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10757 #ifdef TARGET_NR_arch_prctl
10758 case TARGET_NR_arch_prctl:
10759 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10760 ret = do_arch_prctl(cpu_env, arg1, arg2);
10763 goto unimplemented;
10766 #ifdef TARGET_NR_pread64
10767 case TARGET_NR_pread64:
10768 if (regpairs_aligned(cpu_env, num)) {
10772 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10774 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10775 unlock_user(p, arg2, ret);
10777 case TARGET_NR_pwrite64:
10778 if (regpairs_aligned(cpu_env, num)) {
10782 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10784 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10785 unlock_user(p, arg2, 0);
10788 case TARGET_NR_getcwd:
10789 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10791 ret = get_errno(sys_getcwd1(p, arg2));
10792 unlock_user(p, arg1, ret);
10794 case TARGET_NR_capget:
10795 case TARGET_NR_capset:
10797 struct target_user_cap_header *target_header;
10798 struct target_user_cap_data *target_data = NULL;
10799 struct __user_cap_header_struct header;
10800 struct __user_cap_data_struct data[2];
10801 struct __user_cap_data_struct *dataptr = NULL;
10802 int i, target_datalen;
10803 int data_items = 1;
10805 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10808 header.version = tswap32(target_header->version);
10809 header.pid = tswap32(target_header->pid);
10811 if (header.version != _LINUX_CAPABILITY_VERSION) {
10812 /* Version 2 and up takes pointer to two user_data structs */
10816 target_datalen = sizeof(*target_data) * data_items;
10819 if (num == TARGET_NR_capget) {
10820 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10822 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10824 if (!target_data) {
10825 unlock_user_struct(target_header, arg1, 0);
10829 if (num == TARGET_NR_capset) {
10830 for (i = 0; i < data_items; i++) {
10831 data[i].effective = tswap32(target_data[i].effective);
10832 data[i].permitted = tswap32(target_data[i].permitted);
10833 data[i].inheritable = tswap32(target_data[i].inheritable);
10840 if (num == TARGET_NR_capget) {
10841 ret = get_errno(capget(&header, dataptr));
10843 ret = get_errno(capset(&header, dataptr));
10846 /* The kernel always updates version for both capget and capset */
10847 target_header->version = tswap32(header.version);
10848 unlock_user_struct(target_header, arg1, 1);
10851 if (num == TARGET_NR_capget) {
10852 for (i = 0; i < data_items; i++) {
10853 target_data[i].effective = tswap32(data[i].effective);
10854 target_data[i].permitted = tswap32(data[i].permitted);
10855 target_data[i].inheritable = tswap32(data[i].inheritable);
10857 unlock_user(target_data, arg2, target_datalen);
10859 unlock_user(target_data, arg2, 0);
10864 case TARGET_NR_sigaltstack:
10865 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10868 #ifdef CONFIG_SENDFILE
10869 case TARGET_NR_sendfile:
10871 off_t *offp = NULL;
10874 ret = get_user_sal(off, arg3);
10875 if (is_error(ret)) {
10880 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10881 if (!is_error(ret) && arg3) {
10882 abi_long ret2 = put_user_sal(off, arg3);
10883 if (is_error(ret2)) {
10889 #ifdef TARGET_NR_sendfile64
10890 case TARGET_NR_sendfile64:
10892 off_t *offp = NULL;
10895 ret = get_user_s64(off, arg3);
10896 if (is_error(ret)) {
10901 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10902 if (!is_error(ret) && arg3) {
10903 abi_long ret2 = put_user_s64(off, arg3);
10904 if (is_error(ret2)) {
10912 case TARGET_NR_sendfile:
10913 #ifdef TARGET_NR_sendfile64
10914 case TARGET_NR_sendfile64:
10916 goto unimplemented;
10919 #ifdef TARGET_NR_getpmsg
10920 case TARGET_NR_getpmsg:
10921 goto unimplemented;
10923 #ifdef TARGET_NR_putpmsg
10924 case TARGET_NR_putpmsg:
10925 goto unimplemented;
10927 #ifdef TARGET_NR_vfork
10928 case TARGET_NR_vfork:
10929 ret = get_errno(do_fork(cpu_env,
10930 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10934 #ifdef TARGET_NR_ugetrlimit
10935 case TARGET_NR_ugetrlimit:
10937 struct rlimit rlim;
10938 int resource = target_to_host_resource(arg1);
10939 ret = get_errno(getrlimit(resource, &rlim));
10940 if (!is_error(ret)) {
10941 struct target_rlimit *target_rlim;
10942 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10944 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10945 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10946 unlock_user_struct(target_rlim, arg2, 1);
10951 #ifdef TARGET_NR_truncate64
10952 case TARGET_NR_truncate64:
10953 if (!(p = lock_user_string(arg1)))
10955 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10956 unlock_user(p, arg1, 0);
10959 #ifdef TARGET_NR_ftruncate64
10960 case TARGET_NR_ftruncate64:
10961 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10964 #ifdef TARGET_NR_stat64
10965 case TARGET_NR_stat64:
10966 if (!(p = lock_user_string(arg1)))
10968 ret = get_errno(stat(path(p), &st));
10969 unlock_user(p, arg1, 0);
10970 if (!is_error(ret))
10971 ret = host_to_target_stat64(cpu_env, arg2, &st);
10974 #ifdef TARGET_NR_lstat64
10975 case TARGET_NR_lstat64:
10976 if (!(p = lock_user_string(arg1)))
10978 ret = get_errno(lstat(path(p), &st));
10979 unlock_user(p, arg1, 0);
10980 if (!is_error(ret))
10981 ret = host_to_target_stat64(cpu_env, arg2, &st);
10984 #ifdef TARGET_NR_fstat64
10985 case TARGET_NR_fstat64:
10986 ret = get_errno(fstat(arg1, &st));
10987 if (!is_error(ret))
10988 ret = host_to_target_stat64(cpu_env, arg2, &st);
10991 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10992 #ifdef TARGET_NR_fstatat64
10993 case TARGET_NR_fstatat64:
10995 #ifdef TARGET_NR_newfstatat
10996 case TARGET_NR_newfstatat:
10998 if (!(p = lock_user_string(arg2)))
11000 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11001 if (!is_error(ret))
11002 ret = host_to_target_stat64(cpu_env, arg3, &st);
11005 #ifdef TARGET_NR_lchown
11006 case TARGET_NR_lchown:
11007 if (!(p = lock_user_string(arg1)))
11009 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11010 unlock_user(p, arg1, 0);
11013 #ifdef TARGET_NR_getuid
11014 case TARGET_NR_getuid:
11015 ret = get_errno(high2lowuid(getuid()));
11018 #ifdef TARGET_NR_getgid
11019 case TARGET_NR_getgid:
11020 ret = get_errno(high2lowgid(getgid()));
11023 #ifdef TARGET_NR_geteuid
11024 case TARGET_NR_geteuid:
11025 ret = get_errno(high2lowuid(geteuid()));
11028 #ifdef TARGET_NR_getegid
11029 case TARGET_NR_getegid:
11030 ret = get_errno(high2lowgid(getegid()));
11033 case TARGET_NR_setreuid:
11034 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11036 case TARGET_NR_setregid:
11037 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11039 case TARGET_NR_getgroups:
11041 int gidsetsize = arg1;
11042 target_id *target_grouplist;
11046 grouplist = alloca(gidsetsize * sizeof(gid_t));
11047 ret = get_errno(getgroups(gidsetsize, grouplist));
11048 if (gidsetsize == 0)
11050 if (!is_error(ret)) {
11051 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11052 if (!target_grouplist)
11054 for(i = 0;i < ret; i++)
11055 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11056 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11060 case TARGET_NR_setgroups:
11062 int gidsetsize = arg1;
11063 target_id *target_grouplist;
11064 gid_t *grouplist = NULL;
11067 grouplist = alloca(gidsetsize * sizeof(gid_t));
11068 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11069 if (!target_grouplist) {
11070 ret = -TARGET_EFAULT;
11073 for (i = 0; i < gidsetsize; i++) {
11074 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11076 unlock_user(target_grouplist, arg2, 0);
11078 ret = get_errno(setgroups(gidsetsize, grouplist));
11081 case TARGET_NR_fchown:
11082 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11084 #if defined(TARGET_NR_fchownat)
11085 case TARGET_NR_fchownat:
11086 if (!(p = lock_user_string(arg2)))
11088 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11089 low2highgid(arg4), arg5));
11090 unlock_user(p, arg2, 0);
11093 #ifdef TARGET_NR_setresuid
11094 case TARGET_NR_setresuid:
11095 ret = get_errno(sys_setresuid(low2highuid(arg1),
11097 low2highuid(arg3)));
11100 #ifdef TARGET_NR_getresuid
11101 case TARGET_NR_getresuid:
11103 uid_t ruid, euid, suid;
11104 ret = get_errno(getresuid(&ruid, &euid, &suid));
11105 if (!is_error(ret)) {
11106 if (put_user_id(high2lowuid(ruid), arg1)
11107 || put_user_id(high2lowuid(euid), arg2)
11108 || put_user_id(high2lowuid(suid), arg3))
11114 #ifdef TARGET_NR_getresgid
11115 case TARGET_NR_setresgid:
11116 ret = get_errno(sys_setresgid(low2highgid(arg1),
11118 low2highgid(arg3)));
11121 #ifdef TARGET_NR_getresgid
11122 case TARGET_NR_getresgid:
11124 gid_t rgid, egid, sgid;
11125 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11126 if (!is_error(ret)) {
11127 if (put_user_id(high2lowgid(rgid), arg1)
11128 || put_user_id(high2lowgid(egid), arg2)
11129 || put_user_id(high2lowgid(sgid), arg3))
11135 #ifdef TARGET_NR_chown
11136 case TARGET_NR_chown:
11137 if (!(p = lock_user_string(arg1)))
11139 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11140 unlock_user(p, arg1, 0);
11143 case TARGET_NR_setuid:
11144 ret = get_errno(sys_setuid(low2highuid(arg1)));
11146 case TARGET_NR_setgid:
11147 ret = get_errno(sys_setgid(low2highgid(arg1)));
11149 case TARGET_NR_setfsuid:
11150 ret = get_errno(setfsuid(arg1));
11152 case TARGET_NR_setfsgid:
11153 ret = get_errno(setfsgid(arg1));
11156 #ifdef TARGET_NR_lchown32
11157 case TARGET_NR_lchown32:
11158 if (!(p = lock_user_string(arg1)))
11160 ret = get_errno(lchown(p, arg2, arg3));
11161 unlock_user(p, arg1, 0);
11164 #ifdef TARGET_NR_getuid32
11165 case TARGET_NR_getuid32:
11166 ret = get_errno(getuid());
11170 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11171 /* Alpha specific */
11172 case TARGET_NR_getxuid:
11176 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11178 ret = get_errno(getuid());
11181 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11182 /* Alpha specific */
11183 case TARGET_NR_getxgid:
11187 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11189 ret = get_errno(getgid());
11192 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11193 /* Alpha specific */
11194 case TARGET_NR_osf_getsysinfo:
11195 ret = -TARGET_EOPNOTSUPP;
11197 case TARGET_GSI_IEEE_FP_CONTROL:
11199 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11201 /* Copied from linux ieee_fpcr_to_swcr. */
11202 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11203 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11204 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11205 | SWCR_TRAP_ENABLE_DZE
11206 | SWCR_TRAP_ENABLE_OVF);
11207 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11208 | SWCR_TRAP_ENABLE_INE);
11209 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11210 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11212 if (put_user_u64 (swcr, arg2))
11218 /* case GSI_IEEE_STATE_AT_SIGNAL:
11219 -- Not implemented in linux kernel.
11221 -- Retrieves current unaligned access state; not much used.
11222 case GSI_PROC_TYPE:
11223 -- Retrieves implver information; surely not used.
11224 case GSI_GET_HWRPB:
11225 -- Grabs a copy of the HWRPB; surely not used.
11230 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11231 /* Alpha specific */
11232 case TARGET_NR_osf_setsysinfo:
11233 ret = -TARGET_EOPNOTSUPP;
11235 case TARGET_SSI_IEEE_FP_CONTROL:
11237 uint64_t swcr, fpcr, orig_fpcr;
11239 if (get_user_u64 (swcr, arg2)) {
11242 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11243 fpcr = orig_fpcr & FPCR_DYN_MASK;
11245 /* Copied from linux ieee_swcr_to_fpcr. */
11246 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11247 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11248 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11249 | SWCR_TRAP_ENABLE_DZE
11250 | SWCR_TRAP_ENABLE_OVF)) << 48;
11251 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11252 | SWCR_TRAP_ENABLE_INE)) << 57;
11253 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11254 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11256 cpu_alpha_store_fpcr(cpu_env, fpcr);
11261 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11263 uint64_t exc, fpcr, orig_fpcr;
11266 if (get_user_u64(exc, arg2)) {
11270 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11272 /* We only add to the exception status here. */
11273 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11275 cpu_alpha_store_fpcr(cpu_env, fpcr);
11278 /* Old exceptions are not signaled. */
11279 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11281 /* If any exceptions set by this call,
11282 and are unmasked, send a signal. */
11284 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11285 si_code = TARGET_FPE_FLTRES;
11287 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11288 si_code = TARGET_FPE_FLTUND;
11290 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11291 si_code = TARGET_FPE_FLTOVF;
11293 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11294 si_code = TARGET_FPE_FLTDIV;
11296 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11297 si_code = TARGET_FPE_FLTINV;
11299 if (si_code != 0) {
11300 target_siginfo_t info;
11301 info.si_signo = SIGFPE;
11303 info.si_code = si_code;
11304 info._sifields._sigfault._addr
11305 = ((CPUArchState *)cpu_env)->pc;
11306 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11307 QEMU_SI_FAULT, &info);
11312 /* case SSI_NVPAIRS:
11313 -- Used with SSIN_UACPROC to enable unaligned accesses.
11314 case SSI_IEEE_STATE_AT_SIGNAL:
11315 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11316 -- Not implemented in linux kernel
11321 #ifdef TARGET_NR_osf_sigprocmask
11322 /* Alpha specific. */
11323 case TARGET_NR_osf_sigprocmask:
11327 sigset_t set, oldset;
11330 case TARGET_SIG_BLOCK:
11333 case TARGET_SIG_UNBLOCK:
11336 case TARGET_SIG_SETMASK:
11340 ret = -TARGET_EINVAL;
11344 target_to_host_old_sigset(&set, &mask);
11345 ret = do_sigprocmask(how, &set, &oldset);
11347 host_to_target_old_sigset(&mask, &oldset);
11354 #ifdef TARGET_NR_getgid32
11355 case TARGET_NR_getgid32:
11356 ret = get_errno(getgid());
11359 #ifdef TARGET_NR_geteuid32
11360 case TARGET_NR_geteuid32:
11361 ret = get_errno(geteuid());
11364 #ifdef TARGET_NR_getegid32
11365 case TARGET_NR_getegid32:
11366 ret = get_errno(getegid());
11369 #ifdef TARGET_NR_setreuid32
11370 case TARGET_NR_setreuid32:
11371 ret = get_errno(setreuid(arg1, arg2));
11374 #ifdef TARGET_NR_setregid32
11375 case TARGET_NR_setregid32:
11376 ret = get_errno(setregid(arg1, arg2));
11379 #ifdef TARGET_NR_getgroups32
11380 case TARGET_NR_getgroups32:
11382 int gidsetsize = arg1;
11383 uint32_t *target_grouplist;
11387 grouplist = alloca(gidsetsize * sizeof(gid_t));
11388 ret = get_errno(getgroups(gidsetsize, grouplist));
11389 if (gidsetsize == 0)
11391 if (!is_error(ret)) {
11392 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11393 if (!target_grouplist) {
11394 ret = -TARGET_EFAULT;
11397 for(i = 0;i < ret; i++)
11398 target_grouplist[i] = tswap32(grouplist[i]);
11399 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11404 #ifdef TARGET_NR_setgroups32
11405 case TARGET_NR_setgroups32:
11407 int gidsetsize = arg1;
11408 uint32_t *target_grouplist;
11412 grouplist = alloca(gidsetsize * sizeof(gid_t));
11413 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11414 if (!target_grouplist) {
11415 ret = -TARGET_EFAULT;
11418 for(i = 0;i < gidsetsize; i++)
11419 grouplist[i] = tswap32(target_grouplist[i]);
11420 unlock_user(target_grouplist, arg2, 0);
11421 ret = get_errno(setgroups(gidsetsize, grouplist));
11425 #ifdef TARGET_NR_fchown32
11426 case TARGET_NR_fchown32:
11427 ret = get_errno(fchown(arg1, arg2, arg3));
11430 #ifdef TARGET_NR_setresuid32
11431 case TARGET_NR_setresuid32:
11432 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11435 #ifdef TARGET_NR_getresuid32
11436 case TARGET_NR_getresuid32:
11438 uid_t ruid, euid, suid;
11439 ret = get_errno(getresuid(&ruid, &euid, &suid));
11440 if (!is_error(ret)) {
11441 if (put_user_u32(ruid, arg1)
11442 || put_user_u32(euid, arg2)
11443 || put_user_u32(suid, arg3))
11449 #ifdef TARGET_NR_setresgid32
11450 case TARGET_NR_setresgid32:
11451 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11454 #ifdef TARGET_NR_getresgid32
11455 case TARGET_NR_getresgid32:
11457 gid_t rgid, egid, sgid;
11458 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11459 if (!is_error(ret)) {
11460 if (put_user_u32(rgid, arg1)
11461 || put_user_u32(egid, arg2)
11462 || put_user_u32(sgid, arg3))
11468 #ifdef TARGET_NR_chown32
11469 case TARGET_NR_chown32:
11470 if (!(p = lock_user_string(arg1)))
11472 ret = get_errno(chown(p, arg2, arg3));
11473 unlock_user(p, arg1, 0);
11476 #ifdef TARGET_NR_setuid32
11477 case TARGET_NR_setuid32:
11478 ret = get_errno(sys_setuid(arg1));
11481 #ifdef TARGET_NR_setgid32
11482 case TARGET_NR_setgid32:
11483 ret = get_errno(sys_setgid(arg1));
11486 #ifdef TARGET_NR_setfsuid32
11487 case TARGET_NR_setfsuid32:
11488 ret = get_errno(setfsuid(arg1));
11491 #ifdef TARGET_NR_setfsgid32
11492 case TARGET_NR_setfsgid32:
11493 ret = get_errno(setfsgid(arg1));
11497 case TARGET_NR_pivot_root:
11498 goto unimplemented;
11499 #ifdef TARGET_NR_mincore
11500 case TARGET_NR_mincore:
11503 ret = -TARGET_ENOMEM;
11504 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11508 ret = -TARGET_EFAULT;
11509 p = lock_user_string(arg3);
11513 ret = get_errno(mincore(a, arg2, p));
11514 unlock_user(p, arg3, ret);
11516 unlock_user(a, arg1, 0);
11520 #ifdef TARGET_NR_arm_fadvise64_64
11521 case TARGET_NR_arm_fadvise64_64:
11522 /* arm_fadvise64_64 looks like fadvise64_64 but
11523 * with different argument order: fd, advice, offset, len
11524 * rather than the usual fd, offset, len, advice.
11525 * Note that offset and len are both 64-bit so appear as
11526 * pairs of 32-bit registers.
11528 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11529 target_offset64(arg5, arg6), arg2);
11530 ret = -host_to_target_errno(ret);
11534 #if TARGET_ABI_BITS == 32
11536 #ifdef TARGET_NR_fadvise64_64
11537 case TARGET_NR_fadvise64_64:
11538 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11539 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11547 /* 6 args: fd, offset (high, low), len (high, low), advice */
11548 if (regpairs_aligned(cpu_env, num)) {
11549 /* offset is in (3,4), len in (5,6) and advice in 7 */
11557 ret = -host_to_target_errno(posix_fadvise(arg1,
11558 target_offset64(arg2, arg3),
11559 target_offset64(arg4, arg5),
11564 #ifdef TARGET_NR_fadvise64
11565 case TARGET_NR_fadvise64:
11566 /* 5 args: fd, offset (high, low), len, advice */
11567 if (regpairs_aligned(cpu_env, num)) {
11568 /* offset is in (3,4), len in 5 and advice in 6 */
11574 ret = -host_to_target_errno(posix_fadvise(arg1,
11575 target_offset64(arg2, arg3),
11580 #else /* not a 32-bit ABI */
11581 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11582 #ifdef TARGET_NR_fadvise64_64
11583 case TARGET_NR_fadvise64_64:
11585 #ifdef TARGET_NR_fadvise64
11586 case TARGET_NR_fadvise64:
11588 #ifdef TARGET_S390X
11590 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11591 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11592 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11593 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11597 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11600 #endif /* end of 64-bit ABI fadvise handling */
11602 #ifdef TARGET_NR_madvise
11603 case TARGET_NR_madvise:
11604 /* A straight passthrough may not be safe because qemu sometimes
11605 turns private file-backed mappings into anonymous mappings.
11606 This will break MADV_DONTNEED.
11607 This is a hint, so ignoring and returning success is ok. */
11608 ret = get_errno(0);
11611 #if TARGET_ABI_BITS == 32
11612 case TARGET_NR_fcntl64:
11616 from_flock64_fn *copyfrom = copy_from_user_flock64;
11617 to_flock64_fn *copyto = copy_to_user_flock64;
11620 if (((CPUARMState *)cpu_env)->eabi) {
11621 copyfrom = copy_from_user_eabi_flock64;
11622 copyto = copy_to_user_eabi_flock64;
11626 cmd = target_to_host_fcntl_cmd(arg2);
11627 if (cmd == -TARGET_EINVAL) {
11633 case TARGET_F_GETLK64:
11634 ret = copyfrom(&fl, arg3);
11638 ret = get_errno(fcntl(arg1, cmd, &fl));
11640 ret = copyto(arg3, &fl);
11644 case TARGET_F_SETLK64:
11645 case TARGET_F_SETLKW64:
11646 ret = copyfrom(&fl, arg3);
11650 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11653 ret = do_fcntl(arg1, arg2, arg3);
11659 #ifdef TARGET_NR_cacheflush
11660 case TARGET_NR_cacheflush:
11661 /* self-modifying code is handled automatically, so nothing needed */
11665 #ifdef TARGET_NR_security
11666 case TARGET_NR_security:
11667 goto unimplemented;
11669 #ifdef TARGET_NR_getpagesize
11670 case TARGET_NR_getpagesize:
11671 ret = TARGET_PAGE_SIZE;
11674 case TARGET_NR_gettid:
11675 ret = get_errno(gettid());
11677 #ifdef TARGET_NR_readahead
11678 case TARGET_NR_readahead:
11679 #if TARGET_ABI_BITS == 32
11680 if (regpairs_aligned(cpu_env, num)) {
11685 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11687 ret = get_errno(readahead(arg1, arg2, arg3));
11692 #ifdef TARGET_NR_setxattr
11693 case TARGET_NR_listxattr:
11694 case TARGET_NR_llistxattr:
11698 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11700 ret = -TARGET_EFAULT;
11704 p = lock_user_string(arg1);
11706 if (num == TARGET_NR_listxattr) {
11707 ret = get_errno(listxattr(p, b, arg3));
11709 ret = get_errno(llistxattr(p, b, arg3));
11712 ret = -TARGET_EFAULT;
11714 unlock_user(p, arg1, 0);
11715 unlock_user(b, arg2, arg3);
11718 case TARGET_NR_flistxattr:
11722 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11724 ret = -TARGET_EFAULT;
11728 ret = get_errno(flistxattr(arg1, b, arg3));
11729 unlock_user(b, arg2, arg3);
11732 case TARGET_NR_setxattr:
11733 case TARGET_NR_lsetxattr:
11735 void *p, *n, *v = 0;
11737 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11739 ret = -TARGET_EFAULT;
11743 p = lock_user_string(arg1);
11744 n = lock_user_string(arg2);
11746 if (num == TARGET_NR_setxattr) {
11747 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11749 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11752 ret = -TARGET_EFAULT;
11754 unlock_user(p, arg1, 0);
11755 unlock_user(n, arg2, 0);
11756 unlock_user(v, arg3, 0);
11759 case TARGET_NR_fsetxattr:
11763 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11765 ret = -TARGET_EFAULT;
11769 n = lock_user_string(arg2);
11771 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11773 ret = -TARGET_EFAULT;
11775 unlock_user(n, arg2, 0);
11776 unlock_user(v, arg3, 0);
11779 case TARGET_NR_getxattr:
11780 case TARGET_NR_lgetxattr:
11782 void *p, *n, *v = 0;
11784 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11786 ret = -TARGET_EFAULT;
11790 p = lock_user_string(arg1);
11791 n = lock_user_string(arg2);
11793 if (num == TARGET_NR_getxattr) {
11794 ret = get_errno(getxattr(p, n, v, arg4));
11796 ret = get_errno(lgetxattr(p, n, v, arg4));
11799 ret = -TARGET_EFAULT;
11801 unlock_user(p, arg1, 0);
11802 unlock_user(n, arg2, 0);
11803 unlock_user(v, arg3, arg4);
11806 case TARGET_NR_fgetxattr:
11810 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11812 ret = -TARGET_EFAULT;
11816 n = lock_user_string(arg2);
11818 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11820 ret = -TARGET_EFAULT;
11822 unlock_user(n, arg2, 0);
11823 unlock_user(v, arg3, arg4);
11826 case TARGET_NR_removexattr:
11827 case TARGET_NR_lremovexattr:
11830 p = lock_user_string(arg1);
11831 n = lock_user_string(arg2);
11833 if (num == TARGET_NR_removexattr) {
11834 ret = get_errno(removexattr(p, n));
11836 ret = get_errno(lremovexattr(p, n));
11839 ret = -TARGET_EFAULT;
11841 unlock_user(p, arg1, 0);
11842 unlock_user(n, arg2, 0);
11845 case TARGET_NR_fremovexattr:
11848 n = lock_user_string(arg2);
11850 ret = get_errno(fremovexattr(arg1, n));
11852 ret = -TARGET_EFAULT;
11854 unlock_user(n, arg2, 0);
11858 #endif /* CONFIG_ATTR */
11859 #ifdef TARGET_NR_set_thread_area
11860 case TARGET_NR_set_thread_area:
11861 #if defined(TARGET_MIPS)
11862 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11865 #elif defined(TARGET_CRIS)
11867 ret = -TARGET_EINVAL;
11869 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11873 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11874 ret = do_set_thread_area(cpu_env, arg1);
11876 #elif defined(TARGET_M68K)
11878 TaskState *ts = cpu->opaque;
11879 ts->tp_value = arg1;
11884 goto unimplemented_nowarn;
11887 #ifdef TARGET_NR_get_thread_area
11888 case TARGET_NR_get_thread_area:
11889 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11890 ret = do_get_thread_area(cpu_env, arg1);
11892 #elif defined(TARGET_M68K)
11894 TaskState *ts = cpu->opaque;
11895 ret = ts->tp_value;
11899 goto unimplemented_nowarn;
11902 #ifdef TARGET_NR_getdomainname
11903 case TARGET_NR_getdomainname:
11904 goto unimplemented_nowarn;
11907 #ifdef TARGET_NR_clock_settime
11908 case TARGET_NR_clock_settime:
11910 struct timespec ts;
11912 ret = target_to_host_timespec(&ts, arg2);
11913 if (!is_error(ret)) {
11914 ret = get_errno(clock_settime(arg1, &ts));
11919 #ifdef TARGET_NR_clock_gettime
11920 case TARGET_NR_clock_gettime:
11922 struct timespec ts;
11923 ret = get_errno(clock_gettime(arg1, &ts));
11924 if (!is_error(ret)) {
11925 ret = host_to_target_timespec(arg2, &ts);
11930 #ifdef TARGET_NR_clock_getres
11931 case TARGET_NR_clock_getres:
11933 struct timespec ts;
11934 ret = get_errno(clock_getres(arg1, &ts));
11935 if (!is_error(ret)) {
11936 host_to_target_timespec(arg2, &ts);
11941 #ifdef TARGET_NR_clock_nanosleep
11942 case TARGET_NR_clock_nanosleep:
11944 struct timespec ts;
11945 target_to_host_timespec(&ts, arg3);
11946 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11947 &ts, arg4 ? &ts : NULL));
11949 host_to_target_timespec(arg4, &ts);
11951 #if defined(TARGET_PPC)
11952 /* clock_nanosleep is odd in that it returns positive errno values.
11953 * On PPC, CR0 bit 3 should be set in such a situation. */
11954 if (ret && ret != -TARGET_ERESTARTSYS) {
11955 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11962 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11963 case TARGET_NR_set_tid_address:
11964 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11968 case TARGET_NR_tkill:
11969 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11972 case TARGET_NR_tgkill:
11973 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11974 target_to_host_signal(arg3)));
11977 #ifdef TARGET_NR_set_robust_list
11978 case TARGET_NR_set_robust_list:
11979 case TARGET_NR_get_robust_list:
11980 /* The ABI for supporting robust futexes has userspace pass
11981 * the kernel a pointer to a linked list which is updated by
11982 * userspace after the syscall; the list is walked by the kernel
11983 * when the thread exits. Since the linked list in QEMU guest
11984 * memory isn't a valid linked list for the host and we have
11985 * no way to reliably intercept the thread-death event, we can't
11986 * support these. Silently return ENOSYS so that guest userspace
11987 * falls back to a non-robust futex implementation (which should
11988 * be OK except in the corner case of the guest crashing while
11989 * holding a mutex that is shared with another process via
11992 goto unimplemented_nowarn;
11995 #if defined(TARGET_NR_utimensat)
11996 case TARGET_NR_utimensat:
11998 struct timespec *tsp, ts[2];
12002 target_to_host_timespec(ts, arg3);
12003 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12007 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12009 if (!(p = lock_user_string(arg2))) {
12010 ret = -TARGET_EFAULT;
12013 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12014 unlock_user(p, arg2, 0);
12019 case TARGET_NR_futex:
12020 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12022 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12023 case TARGET_NR_inotify_init:
12024 ret = get_errno(sys_inotify_init());
12026 fd_trans_register(ret, &target_inotify_trans);
12030 #ifdef CONFIG_INOTIFY1
12031 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12032 case TARGET_NR_inotify_init1:
12033 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12034 fcntl_flags_tbl)));
12036 fd_trans_register(ret, &target_inotify_trans);
12041 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12042 case TARGET_NR_inotify_add_watch:
12043 p = lock_user_string(arg2);
12044 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12045 unlock_user(p, arg2, 0);
12048 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12049 case TARGET_NR_inotify_rm_watch:
12050 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12054 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12055 case TARGET_NR_mq_open:
12057 struct mq_attr posix_mq_attr;
12058 struct mq_attr *pposix_mq_attr;
12061 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12062 pposix_mq_attr = NULL;
12064 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12067 pposix_mq_attr = &posix_mq_attr;
12069 p = lock_user_string(arg1 - 1);
12073 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12074 unlock_user (p, arg1, 0);
12078 case TARGET_NR_mq_unlink:
12079 p = lock_user_string(arg1 - 1);
12081 ret = -TARGET_EFAULT;
12084 ret = get_errno(mq_unlink(p));
12085 unlock_user (p, arg1, 0);
12088 case TARGET_NR_mq_timedsend:
12090 struct timespec ts;
12092 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12094 target_to_host_timespec(&ts, arg5);
12095 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12096 host_to_target_timespec(arg5, &ts);
12098 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12100 unlock_user (p, arg2, arg3);
12104 case TARGET_NR_mq_timedreceive:
12106 struct timespec ts;
12109 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12111 target_to_host_timespec(&ts, arg5);
12112 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12114 host_to_target_timespec(arg5, &ts);
12116 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12119 unlock_user (p, arg2, arg3);
12121 put_user_u32(prio, arg4);
12125 /* Not implemented for now... */
12126 /* case TARGET_NR_mq_notify: */
12129 case TARGET_NR_mq_getsetattr:
12131 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12134 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12135 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12136 &posix_mq_attr_out));
12137 } else if (arg3 != 0) {
12138 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12140 if (ret == 0 && arg3 != 0) {
12141 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12147 #ifdef CONFIG_SPLICE
12148 #ifdef TARGET_NR_tee
12149 case TARGET_NR_tee:
12151 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12155 #ifdef TARGET_NR_splice
12156 case TARGET_NR_splice:
12158 loff_t loff_in, loff_out;
12159 loff_t *ploff_in = NULL, *ploff_out = NULL;
12161 if (get_user_u64(loff_in, arg2)) {
12164 ploff_in = &loff_in;
12167 if (get_user_u64(loff_out, arg4)) {
12170 ploff_out = &loff_out;
12172 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12174 if (put_user_u64(loff_in, arg2)) {
12179 if (put_user_u64(loff_out, arg4)) {
12186 #ifdef TARGET_NR_vmsplice
12187 case TARGET_NR_vmsplice:
12189 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12191 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12192 unlock_iovec(vec, arg2, arg3, 0);
12194 ret = -host_to_target_errno(errno);
12199 #endif /* CONFIG_SPLICE */
12200 #ifdef CONFIG_EVENTFD
12201 #if defined(TARGET_NR_eventfd)
12202 case TARGET_NR_eventfd:
12203 ret = get_errno(eventfd(arg1, 0));
12205 fd_trans_register(ret, &target_eventfd_trans);
12209 #if defined(TARGET_NR_eventfd2)
12210 case TARGET_NR_eventfd2:
12212 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12213 if (arg2 & TARGET_O_NONBLOCK) {
12214 host_flags |= O_NONBLOCK;
12216 if (arg2 & TARGET_O_CLOEXEC) {
12217 host_flags |= O_CLOEXEC;
12219 ret = get_errno(eventfd(arg1, host_flags));
12221 fd_trans_register(ret, &target_eventfd_trans);
12226 #endif /* CONFIG_EVENTFD */
12227 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12228 case TARGET_NR_fallocate:
12229 #if TARGET_ABI_BITS == 32
12230 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12231 target_offset64(arg5, arg6)));
12233 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12237 #if defined(CONFIG_SYNC_FILE_RANGE)
12238 #if defined(TARGET_NR_sync_file_range)
12239 case TARGET_NR_sync_file_range:
12240 #if TARGET_ABI_BITS == 32
12241 #if defined(TARGET_MIPS)
12242 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12243 target_offset64(arg5, arg6), arg7));
12245 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12246 target_offset64(arg4, arg5), arg6));
12247 #endif /* !TARGET_MIPS */
12249 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12253 #if defined(TARGET_NR_sync_file_range2)
12254 case TARGET_NR_sync_file_range2:
12255 /* This is like sync_file_range but the arguments are reordered */
12256 #if TARGET_ABI_BITS == 32
12257 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12258 target_offset64(arg5, arg6), arg2));
12260 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12265 #if defined(TARGET_NR_signalfd4)
12266 case TARGET_NR_signalfd4:
12267 ret = do_signalfd4(arg1, arg2, arg4);
12270 #if defined(TARGET_NR_signalfd)
12271 case TARGET_NR_signalfd:
12272 ret = do_signalfd4(arg1, arg2, 0);
12275 #if defined(CONFIG_EPOLL)
12276 #if defined(TARGET_NR_epoll_create)
12277 case TARGET_NR_epoll_create:
12278 ret = get_errno(epoll_create(arg1));
12281 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12282 case TARGET_NR_epoll_create1:
12283 ret = get_errno(epoll_create1(arg1));
12286 #if defined(TARGET_NR_epoll_ctl)
12287 case TARGET_NR_epoll_ctl:
12289 struct epoll_event ep;
12290 struct epoll_event *epp = 0;
12292 struct target_epoll_event *target_ep;
12293 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12296 ep.events = tswap32(target_ep->events);
12297 /* The epoll_data_t union is just opaque data to the kernel,
12298 * so we transfer all 64 bits across and need not worry what
12299 * actual data type it is.
12301 ep.data.u64 = tswap64(target_ep->data.u64);
12302 unlock_user_struct(target_ep, arg4, 0);
12305 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12310 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12311 #if defined(TARGET_NR_epoll_wait)
12312 case TARGET_NR_epoll_wait:
12314 #if defined(TARGET_NR_epoll_pwait)
12315 case TARGET_NR_epoll_pwait:
12318 struct target_epoll_event *target_ep;
12319 struct epoll_event *ep;
12321 int maxevents = arg3;
12322 int timeout = arg4;
12324 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12325 ret = -TARGET_EINVAL;
12329 target_ep = lock_user(VERIFY_WRITE, arg2,
12330 maxevents * sizeof(struct target_epoll_event), 1);
12335 ep = g_try_new(struct epoll_event, maxevents);
12337 unlock_user(target_ep, arg2, 0);
12338 ret = -TARGET_ENOMEM;
12343 #if defined(TARGET_NR_epoll_pwait)
12344 case TARGET_NR_epoll_pwait:
12346 target_sigset_t *target_set;
12347 sigset_t _set, *set = &_set;
12350 if (arg6 != sizeof(target_sigset_t)) {
12351 ret = -TARGET_EINVAL;
12355 target_set = lock_user(VERIFY_READ, arg5,
12356 sizeof(target_sigset_t), 1);
12358 ret = -TARGET_EFAULT;
12361 target_to_host_sigset(set, target_set);
12362 unlock_user(target_set, arg5, 0);
12367 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12368 set, SIGSET_T_SIZE));
12372 #if defined(TARGET_NR_epoll_wait)
12373 case TARGET_NR_epoll_wait:
12374 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12379 ret = -TARGET_ENOSYS;
12381 if (!is_error(ret)) {
12383 for (i = 0; i < ret; i++) {
12384 target_ep[i].events = tswap32(ep[i].events);
12385 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12387 unlock_user(target_ep, arg2,
12388 ret * sizeof(struct target_epoll_event));
12390 unlock_user(target_ep, arg2, 0);
12397 #ifdef TARGET_NR_prlimit64
12398 case TARGET_NR_prlimit64:
12400 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12401 struct target_rlimit64 *target_rnew, *target_rold;
12402 struct host_rlimit64 rnew, rold, *rnewp = 0;
12403 int resource = target_to_host_resource(arg2);
12405 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12408 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12409 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12410 unlock_user_struct(target_rnew, arg3, 0);
12414 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12415 if (!is_error(ret) && arg4) {
12416 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12419 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12420 target_rold->rlim_max = tswap64(rold.rlim_max);
12421 unlock_user_struct(target_rold, arg4, 1);
12426 #ifdef TARGET_NR_gethostname
12427 case TARGET_NR_gethostname:
12429 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12431 ret = get_errno(gethostname(name, arg2));
12432 unlock_user(name, arg1, arg2);
12434 ret = -TARGET_EFAULT;
12439 #ifdef TARGET_NR_atomic_cmpxchg_32
12440 case TARGET_NR_atomic_cmpxchg_32:
12442 /* should use start_exclusive from main.c */
12443 abi_ulong mem_value;
12444 if (get_user_u32(mem_value, arg6)) {
12445 target_siginfo_t info;
12446 info.si_signo = SIGSEGV;
12448 info.si_code = TARGET_SEGV_MAPERR;
12449 info._sifields._sigfault._addr = arg6;
12450 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12451 QEMU_SI_FAULT, &info);
12455 if (mem_value == arg2)
12456 put_user_u32(arg1, arg6);
12461 #ifdef TARGET_NR_atomic_barrier
12462 case TARGET_NR_atomic_barrier:
12464 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12470 #ifdef TARGET_NR_timer_create
12471 case TARGET_NR_timer_create:
12473 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12475 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12478 int timer_index = next_free_host_timer();
12480 if (timer_index < 0) {
12481 ret = -TARGET_EAGAIN;
12483 timer_t *phtimer = g_posix_timers + timer_index;
12486 phost_sevp = &host_sevp;
12487 ret = target_to_host_sigevent(phost_sevp, arg2);
12493 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12497 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12506 #ifdef TARGET_NR_timer_settime
12507 case TARGET_NR_timer_settime:
12509 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12510 * struct itimerspec * old_value */
12511 target_timer_t timerid = get_timer_id(arg1);
12515 } else if (arg3 == 0) {
12516 ret = -TARGET_EINVAL;
12518 timer_t htimer = g_posix_timers[timerid];
12519 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12521 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12525 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12526 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12534 #ifdef TARGET_NR_timer_gettime
12535 case TARGET_NR_timer_gettime:
12537 /* args: timer_t timerid, struct itimerspec *curr_value */
12538 target_timer_t timerid = get_timer_id(arg1);
12542 } else if (!arg2) {
12543 ret = -TARGET_EFAULT;
12545 timer_t htimer = g_posix_timers[timerid];
12546 struct itimerspec hspec;
12547 ret = get_errno(timer_gettime(htimer, &hspec));
12549 if (host_to_target_itimerspec(arg2, &hspec)) {
12550 ret = -TARGET_EFAULT;
12557 #ifdef TARGET_NR_timer_getoverrun
12558 case TARGET_NR_timer_getoverrun:
12560 /* args: timer_t timerid */
12561 target_timer_t timerid = get_timer_id(arg1);
12566 timer_t htimer = g_posix_timers[timerid];
12567 ret = get_errno(timer_getoverrun(htimer));
12569 fd_trans_unregister(ret);
12574 #ifdef TARGET_NR_timer_delete
12575 case TARGET_NR_timer_delete:
12577 /* args: timer_t timerid */
12578 target_timer_t timerid = get_timer_id(arg1);
12583 timer_t htimer = g_posix_timers[timerid];
12584 ret = get_errno(timer_delete(htimer));
12585 g_posix_timers[timerid] = 0;
12591 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12592 case TARGET_NR_timerfd_create:
12593 ret = get_errno(timerfd_create(arg1,
12594 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12598 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12599 case TARGET_NR_timerfd_gettime:
12601 struct itimerspec its_curr;
12603 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12605 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12612 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12613 case TARGET_NR_timerfd_settime:
12615 struct itimerspec its_new, its_old, *p_new;
12618 if (target_to_host_itimerspec(&its_new, arg3)) {
12626 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12628 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12635 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12636 case TARGET_NR_ioprio_get:
12637 ret = get_errno(ioprio_get(arg1, arg2));
12641 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12642 case TARGET_NR_ioprio_set:
12643 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12647 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12648 case TARGET_NR_setns:
12649 ret = get_errno(setns(arg1, arg2));
12652 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12653 case TARGET_NR_unshare:
12654 ret = get_errno(unshare(arg1));
12657 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12658 case TARGET_NR_kcmp:
12659 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12665 gemu_log("qemu: Unsupported syscall: %d\n", num);
12666 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12667 unimplemented_nowarn:
12669 ret = -TARGET_ENOSYS;
12674 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12677 print_syscall_ret(num, ret);
12678 trace_guest_user_syscall_ret(cpu, num, ret);
12681 ret = -TARGET_EFAULT;